mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 09:22:44 +00:00
Merge pull request #120911 from gjkim42/devicemanager-remove-deprecated-sets-string
pkg/kubelet/cm: Remove deprecated sets.String and sets.Int
This commit is contained in:
commit
0de29e1d43
@ -801,7 +801,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
|
||||
NodeAllocatableConfig: cm.NodeAllocatableConfig{
|
||||
KubeReservedCgroupName: s.KubeReservedCgroup,
|
||||
SystemReservedCgroupName: s.SystemReservedCgroup,
|
||||
EnforceNodeAllocatable: sets.NewString(s.EnforceNodeAllocatable...),
|
||||
EnforceNodeAllocatable: sets.New(s.EnforceNodeAllocatable...),
|
||||
KubeReserved: kubeReserved,
|
||||
SystemReserved: systemReserved,
|
||||
ReservedSystemCPUs: reservedSystemCPUs,
|
||||
|
@ -246,7 +246,7 @@ func (m *cgroupManagerImpl) Validate(name CgroupName) error {
|
||||
}
|
||||
difference := neededControllers.Difference(enabledControllers)
|
||||
if difference.Len() > 0 {
|
||||
return fmt.Errorf("cgroup %q has some missing controllers: %v", name, strings.Join(difference.List(), ", "))
|
||||
return fmt.Errorf("cgroup %q has some missing controllers: %v", name, strings.Join(sets.List(difference), ", "))
|
||||
}
|
||||
return nil // valid V2 cgroup
|
||||
}
|
||||
@ -260,7 +260,7 @@ func (m *cgroupManagerImpl) Validate(name CgroupName) error {
|
||||
// scoped to the set control groups it understands. this is being discussed
|
||||
// in https://github.com/opencontainers/runc/issues/1440
|
||||
// once resolved, we can remove this code.
|
||||
allowlistControllers := sets.NewString("cpu", "cpuacct", "cpuset", "memory", "systemd", "pids")
|
||||
allowlistControllers := sets.New[string]("cpu", "cpuacct", "cpuset", "memory", "systemd", "pids")
|
||||
|
||||
if _, ok := m.subsystems.MountPoints["hugetlb"]; ok {
|
||||
allowlistControllers.Insert("hugetlb")
|
||||
@ -322,24 +322,24 @@ func getCPUWeight(cpuShares *uint64) uint64 {
|
||||
}
|
||||
|
||||
// readUnifiedControllers reads the controllers available at the specified cgroup
|
||||
func readUnifiedControllers(path string) (sets.String, error) {
|
||||
func readUnifiedControllers(path string) (sets.Set[string], error) {
|
||||
controllersFileContent, err := os.ReadFile(filepath.Join(path, "cgroup.controllers"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
controllers := strings.Fields(string(controllersFileContent))
|
||||
return sets.NewString(controllers...), nil
|
||||
return sets.New(controllers...), nil
|
||||
}
|
||||
|
||||
var (
|
||||
availableRootControllersOnce sync.Once
|
||||
availableRootControllers sets.String
|
||||
availableRootControllers sets.Set[string]
|
||||
)
|
||||
|
||||
// getSupportedUnifiedControllers returns a set of supported controllers when running on cgroup v2
|
||||
func getSupportedUnifiedControllers() sets.String {
|
||||
func getSupportedUnifiedControllers() sets.Set[string] {
|
||||
// This is the set of controllers used by the Kubelet
|
||||
supportedControllers := sets.NewString("cpu", "cpuset", "memory", "hugetlb", "pids")
|
||||
supportedControllers := sets.New("cpu", "cpuset", "memory", "hugetlb", "pids")
|
||||
// Memoize the set of controllers that are present in the root cgroup
|
||||
availableRootControllersOnce.Do(func() {
|
||||
var err error
|
||||
@ -407,7 +407,7 @@ func (m *cgroupManagerImpl) maybeSetHugetlb(resourceConfig *ResourceConfig, reso
|
||||
}
|
||||
|
||||
// For each page size enumerated, set that value.
|
||||
pageSizes := sets.NewString()
|
||||
pageSizes := sets.New[string]()
|
||||
for pageSize, limit := range resourceConfig.HugePageLimit {
|
||||
sizeString, err := v1helper.HugePageUnitSizeFromByteSize(pageSize)
|
||||
if err != nil {
|
||||
@ -485,7 +485,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int {
|
||||
cgroupFsName := m.Name(name)
|
||||
|
||||
// Get a list of processes that we need to kill
|
||||
pidsToKill := sets.NewInt()
|
||||
pidsToKill := sets.New[int]()
|
||||
var pids []int
|
||||
for _, val := range m.subsystems.MountPoints {
|
||||
dir := path.Join(val, cgroupFsName)
|
||||
@ -526,7 +526,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int {
|
||||
klog.V(4).InfoS("Cgroup manager encountered error scanning pids for directory", "path", dir, "err", err)
|
||||
}
|
||||
}
|
||||
return pidsToKill.List()
|
||||
return sets.List(pidsToKill)
|
||||
}
|
||||
|
||||
// ReduceCPULimits reduces the cgroup's cpu shares to the lowest possible value
|
||||
|
@ -164,7 +164,7 @@ type NodeAllocatableConfig struct {
|
||||
KubeReservedCgroupName string
|
||||
SystemReservedCgroupName string
|
||||
ReservedSystemCPUs cpuset.CPUSet
|
||||
EnforceNodeAllocatable sets.String
|
||||
EnforceNodeAllocatable sets.Set[string]
|
||||
KubeReserved v1.ResourceList
|
||||
SystemReserved v1.ResourceList
|
||||
HardEvictionThresholds []evictionapi.Threshold
|
||||
|
@ -161,7 +161,7 @@ func validateSystemRequirements(mountUtil mount.Interface) (features, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
expectedCgroups := sets.NewString("cpu", "cpuacct", "cpuset", "memory")
|
||||
expectedCgroups := sets.New("cpu", "cpuacct", "cpuset", "memory")
|
||||
for _, mountPoint := range mountPoints {
|
||||
if mountPoint.Type == cgroupMountType {
|
||||
for _, opt := range mountPoint.Opts {
|
||||
@ -176,7 +176,7 @@ func validateSystemRequirements(mountUtil mount.Interface) (features, error) {
|
||||
}
|
||||
|
||||
if expectedCgroups.Len() > 0 {
|
||||
return f, fmt.Errorf("%s - Following Cgroup subsystem not mounted: %v", localErr, expectedCgroups.List())
|
||||
return f, fmt.Errorf("%s - Following Cgroup subsystem not mounted: %v", localErr, sets.List(expectedCgroups))
|
||||
}
|
||||
|
||||
// Check if cpu quota is available.
|
||||
|
@ -35,14 +35,14 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
alphaOptions = sets.NewString(
|
||||
alphaOptions = sets.New[string](
|
||||
DistributeCPUsAcrossNUMAOption,
|
||||
AlignBySocketOption,
|
||||
)
|
||||
betaOptions = sets.NewString(
|
||||
betaOptions = sets.New[string](
|
||||
FullPCPUsOnlyOption,
|
||||
)
|
||||
stableOptions = sets.NewString()
|
||||
stableOptions = sets.New[string]()
|
||||
)
|
||||
|
||||
// CheckPolicyOptionAvailable verifies if the given option can be used depending on the Feature Gate Settings.
|
||||
|
@ -62,9 +62,9 @@ func NewDevicesPerNUMA() DevicesPerNUMA {
|
||||
}
|
||||
|
||||
// Devices is a function that returns all device ids for all NUMA nodes
|
||||
// and represent it as sets.String
|
||||
func (dev DevicesPerNUMA) Devices() sets.String {
|
||||
result := sets.NewString()
|
||||
// and represent it as sets.Set[string]
|
||||
func (dev DevicesPerNUMA) Devices() sets.Set[string] {
|
||||
result := sets.New[string]()
|
||||
|
||||
for _, devs := range dev {
|
||||
result.Insert(devs...)
|
||||
|
@ -74,13 +74,13 @@ type ManagerImpl struct {
|
||||
allDevices ResourceDeviceInstances
|
||||
|
||||
// healthyDevices contains all of the registered healthy resourceNames and their exported device IDs.
|
||||
healthyDevices map[string]sets.String
|
||||
healthyDevices map[string]sets.Set[string]
|
||||
|
||||
// unhealthyDevices contains all of the unhealthy devices and their exported device IDs.
|
||||
unhealthyDevices map[string]sets.String
|
||||
unhealthyDevices map[string]sets.Set[string]
|
||||
|
||||
// allocatedDevices contains allocated deviceIds, keyed by resourceName.
|
||||
allocatedDevices map[string]sets.String
|
||||
allocatedDevices map[string]sets.Set[string]
|
||||
|
||||
// podDevices contains pod to allocated device mapping.
|
||||
podDevices *podDevices
|
||||
@ -106,7 +106,7 @@ type ManagerImpl struct {
|
||||
// containerRunningSet identifies which container among those present in `containerMap`
|
||||
// was reported running by the container runtime when `containerMap` was computed.
|
||||
// Used to detect pods running across a restart
|
||||
containerRunningSet sets.String
|
||||
containerRunningSet sets.Set[string]
|
||||
}
|
||||
|
||||
type endpointInfo struct {
|
||||
@ -117,7 +117,7 @@ type endpointInfo struct {
|
||||
type sourcesReadyStub struct{}
|
||||
|
||||
// PodReusableDevices is a map by pod name of devices to reuse.
|
||||
type PodReusableDevices map[string]map[string]sets.String
|
||||
type PodReusableDevices map[string]map[string]sets.Set[string]
|
||||
|
||||
func (s *sourcesReadyStub) AddSource(source string) {}
|
||||
func (s *sourcesReadyStub) AllReady() bool { return true }
|
||||
@ -143,9 +143,9 @@ func newManagerImpl(socketPath string, topology []cadvisorapi.Node, topologyAffi
|
||||
endpoints: make(map[string]endpointInfo),
|
||||
|
||||
allDevices: NewResourceDeviceInstances(),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
unhealthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
healthyDevices: make(map[string]sets.Set[string]),
|
||||
unhealthyDevices: make(map[string]sets.Set[string]),
|
||||
allocatedDevices: make(map[string]sets.Set[string]),
|
||||
podDevices: newPodDevices(),
|
||||
numaNodes: numaNodes,
|
||||
topologyAffinityStore: topologyAffinityStore,
|
||||
@ -259,8 +259,8 @@ func (m *ManagerImpl) PluginListAndWatchReceiver(resourceName string, resp *plug
|
||||
func (m *ManagerImpl) genericDeviceUpdateCallback(resourceName string, devices []pluginapi.Device) {
|
||||
healthyCount := 0
|
||||
m.mutex.Lock()
|
||||
m.healthyDevices[resourceName] = sets.NewString()
|
||||
m.unhealthyDevices[resourceName] = sets.NewString()
|
||||
m.healthyDevices[resourceName] = sets.New[string]()
|
||||
m.unhealthyDevices[resourceName] = sets.New[string]()
|
||||
m.allDevices[resourceName] = make(map[string]pluginapi.Device)
|
||||
for _, dev := range devices {
|
||||
m.allDevices[resourceName][dev.ID] = dev
|
||||
@ -291,7 +291,7 @@ func (m *ManagerImpl) checkpointFile() string {
|
||||
// Start starts the Device Plugin Manager and start initialization of
|
||||
// podDevices and allocatedDevices information from checkpointed state and
|
||||
// starts device plugin registration service.
|
||||
func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.String) error {
|
||||
func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.Set[string]) error {
|
||||
klog.V(2).InfoS("Starting Device Plugin manager")
|
||||
|
||||
m.activePods = activePods
|
||||
@ -323,7 +323,7 @@ func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error {
|
||||
m.setPodPendingAdmission(pod)
|
||||
|
||||
if _, ok := m.devicesToReuse[string(pod.UID)]; !ok {
|
||||
m.devicesToReuse[string(pod.UID)] = make(map[string]sets.String)
|
||||
m.devicesToReuse[string(pod.UID)] = make(map[string]sets.Set[string])
|
||||
}
|
||||
// If pod entries to m.devicesToReuse other than the current pod exist, delete them.
|
||||
for podUID := range m.devicesToReuse {
|
||||
@ -365,13 +365,13 @@ func (m *ManagerImpl) UpdatePluginResources(node *schedulerframework.NodeInfo, a
|
||||
|
||||
func (m *ManagerImpl) markResourceUnhealthy(resourceName string) {
|
||||
klog.V(2).InfoS("Mark all resources Unhealthy for resource", "resourceName", resourceName)
|
||||
healthyDevices := sets.NewString()
|
||||
healthyDevices := sets.New[string]()
|
||||
if _, ok := m.healthyDevices[resourceName]; ok {
|
||||
healthyDevices = m.healthyDevices[resourceName]
|
||||
m.healthyDevices[resourceName] = sets.NewString()
|
||||
m.healthyDevices[resourceName] = sets.New[string]()
|
||||
}
|
||||
if _, ok := m.unhealthyDevices[resourceName]; !ok {
|
||||
m.unhealthyDevices[resourceName] = sets.NewString()
|
||||
m.unhealthyDevices[resourceName] = sets.New[string]()
|
||||
}
|
||||
m.unhealthyDevices[resourceName] = m.unhealthyDevices[resourceName].Union(healthyDevices)
|
||||
}
|
||||
@ -392,7 +392,7 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string)
|
||||
needsUpdateCheckpoint := false
|
||||
var capacity = v1.ResourceList{}
|
||||
var allocatable = v1.ResourceList{}
|
||||
deletedResources := sets.NewString()
|
||||
deletedResources := sets.New[string]()
|
||||
m.mutex.Lock()
|
||||
for resourceName, devices := range m.healthyDevices {
|
||||
eI, ok := m.endpoints[resourceName]
|
||||
@ -492,8 +492,8 @@ func (m *ManagerImpl) readCheckpoint() error {
|
||||
for resource := range registeredDevs {
|
||||
// During start up, creates empty healthyDevices list so that the resource capacity
|
||||
// will stay zero till the corresponding device plugin re-registers.
|
||||
m.healthyDevices[resource] = sets.NewString()
|
||||
m.unhealthyDevices[resource] = sets.NewString()
|
||||
m.healthyDevices[resource] = sets.New[string]()
|
||||
m.unhealthyDevices[resource] = sets.New[string]()
|
||||
m.endpoints[resource] = endpointInfo{e: newStoppedEndpointImpl(resource), opts: nil}
|
||||
}
|
||||
return nil
|
||||
@ -536,15 +536,15 @@ func (m *ManagerImpl) UpdateAllocatedDevices() {
|
||||
if len(podsToBeRemoved) <= 0 {
|
||||
return
|
||||
}
|
||||
klog.V(3).InfoS("Pods to be removed", "podUIDs", podsToBeRemoved.List())
|
||||
m.podDevices.delete(podsToBeRemoved.List())
|
||||
klog.V(3).InfoS("Pods to be removed", "podUIDs", sets.List(podsToBeRemoved))
|
||||
m.podDevices.delete(sets.List(podsToBeRemoved))
|
||||
// Regenerated allocatedDevices after we update pod allocation information.
|
||||
m.allocatedDevices = m.podDevices.devices()
|
||||
}
|
||||
|
||||
// Returns list of device Ids we need to allocate with Allocate rpc call.
|
||||
// Returns empty list in case we don't need to issue the Allocate rpc call.
|
||||
func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, required int, reusableDevices sets.String) (sets.String, error) {
|
||||
func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, required int, reusableDevices sets.Set[string]) (sets.Set[string], error) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
needed := required
|
||||
@ -552,7 +552,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
|
||||
// This can happen if a container restarts for example.
|
||||
devices := m.podDevices.containerDevices(podUID, contName, resource)
|
||||
if devices != nil {
|
||||
klog.V(3).InfoS("Found pre-allocated devices for resource on pod", "resourceName", resource, "containerName", contName, "podUID", string(podUID), "devices", devices.List())
|
||||
klog.V(3).InfoS("Found pre-allocated devices for resource on pod", "resourceName", resource, "containerName", contName, "podUID", string(podUID), "devices", sets.List(devices))
|
||||
needed = needed - devices.Len()
|
||||
// A pod's resource is not expected to change once admitted by the API server,
|
||||
// so just fail loudly here. We can revisit this part if this no longer holds.
|
||||
@ -610,11 +610,11 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
|
||||
|
||||
// Declare the list of allocated devices.
|
||||
// This will be populated and returned below.
|
||||
allocated := sets.NewString()
|
||||
allocated := sets.New[string]()
|
||||
|
||||
// Create a closure to help with device allocation
|
||||
// Returns 'true' once no more devices need to be allocated.
|
||||
allocateRemainingFrom := func(devices sets.String) bool {
|
||||
allocateRemainingFrom := func(devices sets.Set[string]) bool {
|
||||
for device := range devices.Difference(allocated) {
|
||||
m.allocatedDevices[resource].Insert(device)
|
||||
allocated.Insert(device)
|
||||
@ -628,7 +628,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
|
||||
|
||||
// Needs to allocate additional devices.
|
||||
if m.allocatedDevices[resource] == nil {
|
||||
m.allocatedDevices[resource] = sets.NewString()
|
||||
m.allocatedDevices[resource] = sets.New[string]()
|
||||
}
|
||||
|
||||
// Allocates from reusableDevices list first.
|
||||
@ -697,22 +697,22 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
|
||||
return nil, fmt.Errorf("unexpectedly allocated less resources than required. Requested: %d, Got: %d", required, required-needed)
|
||||
}
|
||||
|
||||
func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, available sets.String) (sets.String, sets.String, sets.String) {
|
||||
func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, available sets.Set[string]) (sets.Set[string], sets.Set[string], sets.Set[string]) {
|
||||
// If alignment information is not available, just pass the available list back.
|
||||
hint := m.topologyAffinityStore.GetAffinity(podUID, contName)
|
||||
if !m.deviceHasTopologyAlignment(resource) || hint.NUMANodeAffinity == nil {
|
||||
return sets.NewString(), sets.NewString(), available
|
||||
return sets.New[string](), sets.New[string](), available
|
||||
}
|
||||
|
||||
// Build a map of NUMA Nodes to the devices associated with them. A
|
||||
// device may be associated to multiple NUMA nodes at the same time. If an
|
||||
// available device does not have any NUMA Nodes associated with it, add it
|
||||
// to a list of NUMA Nodes for the fake NUMANode -1.
|
||||
perNodeDevices := make(map[int]sets.String)
|
||||
perNodeDevices := make(map[int]sets.Set[string])
|
||||
for d := range available {
|
||||
if m.allDevices[resource][d].Topology == nil || len(m.allDevices[resource][d].Topology.Nodes) == 0 {
|
||||
if _, ok := perNodeDevices[nodeWithoutTopology]; !ok {
|
||||
perNodeDevices[nodeWithoutTopology] = sets.NewString()
|
||||
perNodeDevices[nodeWithoutTopology] = sets.New[string]()
|
||||
}
|
||||
perNodeDevices[nodeWithoutTopology].Insert(d)
|
||||
continue
|
||||
@ -720,7 +720,7 @@ func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, availa
|
||||
|
||||
for _, node := range m.allDevices[resource][d].Topology.Nodes {
|
||||
if _, ok := perNodeDevices[int(node.ID)]; !ok {
|
||||
perNodeDevices[int(node.ID)] = sets.NewString()
|
||||
perNodeDevices[int(node.ID)] = sets.New[string]()
|
||||
}
|
||||
perNodeDevices[int(node.ID)].Insert(d)
|
||||
}
|
||||
@ -791,14 +791,14 @@ func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, availa
|
||||
}
|
||||
|
||||
// Return all three lists containing the full set of devices across them.
|
||||
return sets.NewString(fromAffinity...), sets.NewString(notFromAffinity...), sets.NewString(withoutTopology...)
|
||||
return sets.New[string](fromAffinity...), sets.New[string](notFromAffinity...), sets.New[string](withoutTopology...)
|
||||
}
|
||||
|
||||
// allocateContainerResources attempts to allocate all of required device
|
||||
// plugin resources for the input container, issues an Allocate rpc request
|
||||
// for each new device resource requirement, processes their AllocateResponses,
|
||||
// and updates the cached containerDevices on success.
|
||||
func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Container, devicesToReuse map[string]sets.String) error {
|
||||
func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Container, devicesToReuse map[string]sets.Set[string]) error {
|
||||
podUID := string(pod.UID)
|
||||
contName := container.Name
|
||||
allocatedDevicesUpdated := false
|
||||
@ -981,7 +981,7 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s
|
||||
|
||||
// callGetPreferredAllocationIfAvailable issues GetPreferredAllocation grpc
|
||||
// call for device plugin resource with GetPreferredAllocationAvailable option set.
|
||||
func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, resource string, available, mustInclude sets.String, size int) (sets.String, error) {
|
||||
func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, resource string, available, mustInclude sets.Set[string], size int) (sets.Set[string], error) {
|
||||
eI, ok := m.endpoints[resource]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("endpoint not found in cache for a registered resource: %s", resource)
|
||||
@ -1000,9 +1000,9 @@ func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, re
|
||||
return nil, fmt.Errorf("device plugin GetPreferredAllocation rpc failed with err: %v", err)
|
||||
}
|
||||
if resp != nil && len(resp.ContainerResponses) > 0 {
|
||||
return sets.NewString(resp.ContainerResponses[0].DeviceIDs...), nil
|
||||
return sets.New[string](resp.ContainerResponses[0].DeviceIDs...), nil
|
||||
}
|
||||
return sets.NewString(), nil
|
||||
return sets.New[string](), nil
|
||||
}
|
||||
|
||||
// sanitizeNodeAllocatable scans through allocatedDevices in the device manager
|
||||
|
@ -287,7 +287,7 @@ func setupDeviceManager(t *testing.T, devs []*pluginapi.Device, callback monitor
|
||||
|
||||
// test steady state, initialization where sourcesReady, containerMap and containerRunningSet
|
||||
// are relevant will be tested with a different flow
|
||||
err = w.Start(activePods, &sourcesReadyStub{}, containermap.NewContainerMap(), sets.NewString())
|
||||
err = w.Start(activePods, &sourcesReadyStub{}, containermap.NewContainerMap(), sets.New[string]())
|
||||
require.NoError(t, err)
|
||||
|
||||
return w, updateChan
|
||||
@ -312,6 +312,7 @@ func setupPluginManager(t *testing.T, pluginSocketName string, m Manager) plugin
|
||||
}
|
||||
|
||||
func runPluginManager(pluginManager pluginmanager.PluginManager) {
|
||||
// FIXME: Replace sets.String with sets.Set[string]
|
||||
sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true })
|
||||
go pluginManager.Run(sourcesReady, wait.NeverStop)
|
||||
}
|
||||
@ -459,8 +460,8 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
|
||||
// properly rejected instead of being incorrectly started.
|
||||
err = testManager.writeCheckpoint()
|
||||
as.Nil(err)
|
||||
testManager.healthyDevices = make(map[string]sets.String)
|
||||
testManager.unhealthyDevices = make(map[string]sets.String)
|
||||
testManager.healthyDevices = make(map[string]sets.Set[string])
|
||||
testManager.unhealthyDevices = make(map[string]sets.Set[string])
|
||||
err = testManager.readCheckpoint()
|
||||
as.Nil(err)
|
||||
as.Equal(1, len(testManager.endpoints))
|
||||
@ -673,9 +674,9 @@ func TestCheckpoint(t *testing.T) {
|
||||
as.Nil(err)
|
||||
testManager := &ManagerImpl{
|
||||
endpoints: make(map[string]endpointInfo),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
unhealthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
healthyDevices: make(map[string]sets.Set[string]),
|
||||
unhealthyDevices: make(map[string]sets.Set[string]),
|
||||
allocatedDevices: make(map[string]sets.Set[string]),
|
||||
podDevices: newPodDevices(),
|
||||
checkpointManager: ckm,
|
||||
}
|
||||
@ -718,16 +719,16 @@ func TestCheckpoint(t *testing.T) {
|
||||
),
|
||||
)
|
||||
|
||||
testManager.healthyDevices[resourceName1] = sets.NewString()
|
||||
testManager.healthyDevices[resourceName1] = sets.New[string]()
|
||||
testManager.healthyDevices[resourceName1].Insert("dev1")
|
||||
testManager.healthyDevices[resourceName1].Insert("dev2")
|
||||
testManager.healthyDevices[resourceName1].Insert("dev3")
|
||||
testManager.healthyDevices[resourceName1].Insert("dev4")
|
||||
testManager.healthyDevices[resourceName1].Insert("dev5")
|
||||
testManager.healthyDevices[resourceName2] = sets.NewString()
|
||||
testManager.healthyDevices[resourceName2] = sets.New[string]()
|
||||
testManager.healthyDevices[resourceName2].Insert("dev1")
|
||||
testManager.healthyDevices[resourceName2].Insert("dev2")
|
||||
testManager.healthyDevices[resourceName3] = sets.NewString()
|
||||
testManager.healthyDevices[resourceName3] = sets.New[string]()
|
||||
testManager.healthyDevices[resourceName3].Insert("dev5")
|
||||
|
||||
expectedPodDevices := testManager.podDevices
|
||||
@ -827,9 +828,9 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso
|
||||
return nil, err
|
||||
}
|
||||
m := &ManagerImpl{
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
unhealthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
healthyDevices: make(map[string]sets.Set[string]),
|
||||
unhealthyDevices: make(map[string]sets.Set[string]),
|
||||
allocatedDevices: make(map[string]sets.Set[string]),
|
||||
endpoints: make(map[string]endpointInfo),
|
||||
podDevices: newPodDevices(),
|
||||
devicesToReuse: make(PodReusableDevices),
|
||||
@ -846,7 +847,7 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso
|
||||
}
|
||||
|
||||
for _, res := range testRes {
|
||||
testManager.healthyDevices[res.resourceName] = sets.NewString(res.devs.Devices().UnsortedList()...)
|
||||
testManager.healthyDevices[res.resourceName] = sets.New[string](res.devs.Devices().UnsortedList()...)
|
||||
if res.resourceName == "domain1.com/resource1" {
|
||||
testManager.endpoints[res.resourceName] = endpointInfo{
|
||||
e: &MockEndpoint{allocateFunc: allocateStubFunc()},
|
||||
@ -953,22 +954,22 @@ func TestFilterByAffinity(t *testing.T) {
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
available sets.String
|
||||
fromAffinityExpected sets.String
|
||||
notFromAffinityExpected sets.String
|
||||
withoutTopologyExpected sets.String
|
||||
available sets.Set[string]
|
||||
fromAffinityExpected sets.Set[string]
|
||||
notFromAffinityExpected sets.Set[string]
|
||||
withoutTopologyExpected sets.Set[string]
|
||||
}{
|
||||
{
|
||||
available: sets.NewString("dev1", "dev2"),
|
||||
fromAffinityExpected: sets.NewString("dev2"),
|
||||
notFromAffinityExpected: sets.NewString("dev1"),
|
||||
withoutTopologyExpected: sets.NewString(),
|
||||
available: sets.New[string]("dev1", "dev2"),
|
||||
fromAffinityExpected: sets.New[string]("dev2"),
|
||||
notFromAffinityExpected: sets.New[string]("dev1"),
|
||||
withoutTopologyExpected: sets.New[string](),
|
||||
},
|
||||
{
|
||||
available: sets.NewString("dev1", "dev2", "dev3", "dev4"),
|
||||
fromAffinityExpected: sets.NewString("dev2", "dev3", "dev4"),
|
||||
notFromAffinityExpected: sets.NewString("dev1"),
|
||||
withoutTopologyExpected: sets.NewString(),
|
||||
available: sets.New[string]("dev1", "dev2", "dev3", "dev4"),
|
||||
fromAffinityExpected: sets.New[string]("dev2", "dev3", "dev4"),
|
||||
notFromAffinityExpected: sets.New[string]("dev1"),
|
||||
withoutTopologyExpected: sets.New[string](),
|
||||
},
|
||||
}
|
||||
|
||||
@ -1087,9 +1088,9 @@ func TestPodContainerDeviceToAllocate(t *testing.T) {
|
||||
|
||||
testManager := &ManagerImpl{
|
||||
endpoints: make(map[string]endpointInfo),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
unhealthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
healthyDevices: make(map[string]sets.Set[string]),
|
||||
unhealthyDevices: make(map[string]sets.Set[string]),
|
||||
allocatedDevices: make(map[string]sets.Set[string]),
|
||||
podDevices: newPodDevices(),
|
||||
activePods: func() []*v1.Pod { return []*v1.Pod{} },
|
||||
sourcesReady: &sourcesReadyStub{},
|
||||
@ -1121,8 +1122,8 @@ func TestPodContainerDeviceToAllocate(t *testing.T) {
|
||||
// no healthy devices for resourceName1 and devices corresponding to
|
||||
// resource2 are intentionally omitted to simulate that the resource
|
||||
// hasn't been registered.
|
||||
testManager.healthyDevices[resourceName1] = sets.NewString()
|
||||
testManager.healthyDevices[resourceName3] = sets.NewString()
|
||||
testManager.healthyDevices[resourceName1] = sets.New[string]()
|
||||
testManager.healthyDevices[resourceName3] = sets.New[string]()
|
||||
// dev5 is no longer in the list of healthy devices
|
||||
testManager.healthyDevices[resourceName3].Insert("dev7")
|
||||
testManager.healthyDevices[resourceName3].Insert("dev8")
|
||||
@ -1133,8 +1134,8 @@ func TestPodContainerDeviceToAllocate(t *testing.T) {
|
||||
contName string
|
||||
resource string
|
||||
required int
|
||||
reusableDevices sets.String
|
||||
expectedAllocatedDevices sets.String
|
||||
reusableDevices sets.Set[string]
|
||||
expectedAllocatedDevices sets.Set[string]
|
||||
expErr error
|
||||
}{
|
||||
{
|
||||
@ -1143,7 +1144,7 @@ func TestPodContainerDeviceToAllocate(t *testing.T) {
|
||||
contName: "con1",
|
||||
resource: resourceName1,
|
||||
required: 2,
|
||||
reusableDevices: sets.NewString(),
|
||||
reusableDevices: sets.New[string](),
|
||||
expectedAllocatedDevices: nil,
|
||||
expErr: fmt.Errorf("no healthy devices present; cannot allocate unhealthy devices %s", resourceName1),
|
||||
},
|
||||
@ -1153,7 +1154,7 @@ func TestPodContainerDeviceToAllocate(t *testing.T) {
|
||||
contName: "con2",
|
||||
resource: resourceName2,
|
||||
required: 1,
|
||||
reusableDevices: sets.NewString(),
|
||||
reusableDevices: sets.New[string](),
|
||||
expectedAllocatedDevices: nil,
|
||||
expErr: fmt.Errorf("cannot allocate unregistered device %s", resourceName2),
|
||||
},
|
||||
@ -1163,7 +1164,7 @@ func TestPodContainerDeviceToAllocate(t *testing.T) {
|
||||
contName: "con3",
|
||||
resource: resourceName3,
|
||||
required: 1,
|
||||
reusableDevices: sets.NewString(),
|
||||
reusableDevices: sets.New[string](),
|
||||
expectedAllocatedDevices: nil,
|
||||
expErr: fmt.Errorf("previously allocated devices are no longer healthy; cannot allocate unhealthy devices %s", resourceName3),
|
||||
},
|
||||
@ -1366,8 +1367,8 @@ func TestUpdatePluginResources(t *testing.T) {
|
||||
ckm, err := checkpointmanager.NewCheckpointManager(tmpDir)
|
||||
as.Nil(err)
|
||||
m := &ManagerImpl{
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.Set[string]),
|
||||
healthyDevices: make(map[string]sets.Set[string]),
|
||||
podDevices: newPodDevices(),
|
||||
checkpointManager: ckm,
|
||||
}
|
||||
@ -1378,9 +1379,9 @@ func TestUpdatePluginResources(t *testing.T) {
|
||||
testManager.podDevices.devs[string(pod.UID)] = make(containerDevices)
|
||||
|
||||
// require one of resource1 and one of resource2
|
||||
testManager.allocatedDevices[resourceName1] = sets.NewString()
|
||||
testManager.allocatedDevices[resourceName1] = sets.New[string]()
|
||||
testManager.allocatedDevices[resourceName1].Insert(devID1)
|
||||
testManager.allocatedDevices[resourceName2] = sets.NewString()
|
||||
testManager.allocatedDevices[resourceName2] = sets.New[string]()
|
||||
testManager.allocatedDevices[resourceName2].Insert(devID2)
|
||||
|
||||
cachedNode := &v1.Node{
|
||||
@ -1486,9 +1487,9 @@ func TestResetExtendedResource(t *testing.T) {
|
||||
as.Nil(err)
|
||||
testManager := &ManagerImpl{
|
||||
endpoints: make(map[string]endpointInfo),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
unhealthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
healthyDevices: make(map[string]sets.Set[string]),
|
||||
unhealthyDevices: make(map[string]sets.Set[string]),
|
||||
allocatedDevices: make(map[string]sets.Set[string]),
|
||||
podDevices: newPodDevices(),
|
||||
checkpointManager: ckm,
|
||||
}
|
||||
@ -1502,7 +1503,7 @@ func TestResetExtendedResource(t *testing.T) {
|
||||
),
|
||||
)
|
||||
|
||||
testManager.healthyDevices[extendedResourceName] = sets.NewString()
|
||||
testManager.healthyDevices[extendedResourceName] = sets.New[string]()
|
||||
testManager.healthyDevices[extendedResourceName].Insert("dev1")
|
||||
// checkpoint is present, indicating node hasn't been recreated
|
||||
err = testManager.writeCheckpoint()
|
||||
|
@ -52,10 +52,10 @@ func newPodDevices() *podDevices {
|
||||
return &podDevices{devs: make(map[string]containerDevices)}
|
||||
}
|
||||
|
||||
func (pdev *podDevices) pods() sets.String {
|
||||
func (pdev *podDevices) pods() sets.Set[string] {
|
||||
pdev.RLock()
|
||||
defer pdev.RUnlock()
|
||||
ret := sets.NewString()
|
||||
ret := sets.New[string]()
|
||||
for k := range pdev.devs {
|
||||
ret.Insert(k)
|
||||
}
|
||||
@ -100,11 +100,11 @@ func (pdev *podDevices) delete(pods []string) {
|
||||
|
||||
// Returns list of device Ids allocated to the given pod for the given resource.
|
||||
// Returns nil if we don't have cached state for the given <podUID, resource>.
|
||||
func (pdev *podDevices) podDevices(podUID, resource string) sets.String {
|
||||
func (pdev *podDevices) podDevices(podUID, resource string) sets.Set[string] {
|
||||
pdev.RLock()
|
||||
defer pdev.RUnlock()
|
||||
|
||||
ret := sets.NewString()
|
||||
ret := sets.New[string]()
|
||||
for contName := range pdev.devs[podUID] {
|
||||
ret = ret.Union(pdev.containerDevices(podUID, contName, resource))
|
||||
}
|
||||
@ -113,7 +113,7 @@ func (pdev *podDevices) podDevices(podUID, resource string) sets.String {
|
||||
|
||||
// Returns list of device Ids allocated to the given container for the given resource.
|
||||
// Returns nil if we don't have cached state for the given <podUID, contName, resource>.
|
||||
func (pdev *podDevices) containerDevices(podUID, contName, resource string) sets.String {
|
||||
func (pdev *podDevices) containerDevices(podUID, contName, resource string) sets.Set[string] {
|
||||
pdev.RLock()
|
||||
defer pdev.RUnlock()
|
||||
if _, podExists := pdev.devs[podUID]; !podExists {
|
||||
@ -130,7 +130,7 @@ func (pdev *podDevices) containerDevices(podUID, contName, resource string) sets
|
||||
}
|
||||
|
||||
// Populates allocatedResources with the device resources allocated to the specified <podUID, contName>.
|
||||
func (pdev *podDevices) addContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) {
|
||||
func (pdev *podDevices) addContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.Set[string]) {
|
||||
pdev.RLock()
|
||||
defer pdev.RUnlock()
|
||||
containers, exists := pdev.devs[podUID]
|
||||
@ -147,7 +147,7 @@ func (pdev *podDevices) addContainerAllocatedResources(podUID, contName string,
|
||||
}
|
||||
|
||||
// Removes the device resources allocated to the specified <podUID, contName> from allocatedResources.
|
||||
func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) {
|
||||
func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.Set[string]) {
|
||||
pdev.RLock()
|
||||
defer pdev.RUnlock()
|
||||
containers, exists := pdev.devs[podUID]
|
||||
@ -164,15 +164,15 @@ func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName strin
|
||||
}
|
||||
|
||||
// Returns all of devices allocated to the pods being tracked, keyed by resourceName.
|
||||
func (pdev *podDevices) devices() map[string]sets.String {
|
||||
ret := make(map[string]sets.String)
|
||||
func (pdev *podDevices) devices() map[string]sets.Set[string] {
|
||||
ret := make(map[string]sets.Set[string])
|
||||
pdev.RLock()
|
||||
defer pdev.RUnlock()
|
||||
for _, containerDevices := range pdev.devs {
|
||||
for _, resources := range containerDevices {
|
||||
for resource, devices := range resources {
|
||||
if _, exists := ret[resource]; !exists {
|
||||
ret[resource] = sets.NewString()
|
||||
ret[resource] = sets.New[string]()
|
||||
}
|
||||
if devices.allocResp != nil {
|
||||
ret[resource] = ret[resource].Union(devices.deviceIds.Devices())
|
||||
@ -464,9 +464,9 @@ func (rdev ResourceDeviceInstances) Clone() ResourceDeviceInstances {
|
||||
return clone
|
||||
}
|
||||
|
||||
// Filter takes a condition set expressed as map[string]sets.String and returns a new
|
||||
// Filter takes a condition set expressed as map[string]sets.Set[string] and returns a new
|
||||
// ResourceDeviceInstances with only the devices matching the condition set.
|
||||
func (rdev ResourceDeviceInstances) Filter(cond map[string]sets.String) ResourceDeviceInstances {
|
||||
func (rdev ResourceDeviceInstances) Filter(cond map[string]sets.Set[string]) ResourceDeviceInstances {
|
||||
filtered := NewResourceDeviceInstances()
|
||||
for resourceName, filterIDs := range cond {
|
||||
if _, exists := rdev[resourceName]; !exists {
|
||||
|
@ -65,7 +65,7 @@ func TestGetContainerDevices(t *testing.T) {
|
||||
|
||||
func TestResourceDeviceInstanceFilter(t *testing.T) {
|
||||
var expected string
|
||||
var cond map[string]sets.String
|
||||
var cond map[string]sets.Set[string]
|
||||
var resp ResourceDeviceInstances
|
||||
devs := ResourceDeviceInstances{
|
||||
"foo": DeviceInstances{
|
||||
@ -103,40 +103,40 @@ func TestResourceDeviceInstanceFilter(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
resp = devs.Filter(map[string]sets.String{})
|
||||
resp = devs.Filter(map[string]sets.Set[string]{})
|
||||
expected = `{}`
|
||||
expectResourceDeviceInstances(t, resp, expected)
|
||||
|
||||
cond = map[string]sets.String{
|
||||
"foo": sets.NewString("dev-foo1", "dev-foo2"),
|
||||
"bar": sets.NewString("dev-bar1"),
|
||||
cond = map[string]sets.Set[string]{
|
||||
"foo": sets.New[string]("dev-foo1", "dev-foo2"),
|
||||
"bar": sets.New[string]("dev-bar1"),
|
||||
}
|
||||
resp = devs.Filter(cond)
|
||||
expected = `{"bar":{"dev-bar1":{"ID":"bar1"}},"foo":{"dev-foo1":{"ID":"foo1"},"dev-foo2":{"ID":"foo2"}}}`
|
||||
expectResourceDeviceInstances(t, resp, expected)
|
||||
|
||||
cond = map[string]sets.String{
|
||||
"foo": sets.NewString("dev-foo1", "dev-foo2", "dev-foo3"),
|
||||
"bar": sets.NewString("dev-bar1", "dev-bar2", "dev-bar3"),
|
||||
"baz": sets.NewString("dev-baz1", "dev-baz2", "dev-baz3"),
|
||||
cond = map[string]sets.Set[string]{
|
||||
"foo": sets.New[string]("dev-foo1", "dev-foo2", "dev-foo3"),
|
||||
"bar": sets.New[string]("dev-bar1", "dev-bar2", "dev-bar3"),
|
||||
"baz": sets.New[string]("dev-baz1", "dev-baz2", "dev-baz3"),
|
||||
}
|
||||
resp = devs.Filter(cond)
|
||||
expected = `{"bar":{"dev-bar1":{"ID":"bar1"},"dev-bar2":{"ID":"bar2"},"dev-bar3":{"ID":"bar3"}},"baz":{"dev-baz1":{"ID":"baz1"},"dev-baz2":{"ID":"baz2"},"dev-baz3":{"ID":"baz3"}},"foo":{"dev-foo1":{"ID":"foo1"},"dev-foo2":{"ID":"foo2"},"dev-foo3":{"ID":"foo3"}}}`
|
||||
expectResourceDeviceInstances(t, resp, expected)
|
||||
|
||||
cond = map[string]sets.String{
|
||||
"foo": sets.NewString("dev-foo1", "dev-foo2", "dev-foo3", "dev-foo4"),
|
||||
"bar": sets.NewString("dev-bar1", "dev-bar2", "dev-bar3", "dev-bar4"),
|
||||
"baz": sets.NewString("dev-baz1", "dev-baz2", "dev-baz3", "dev-bar4"),
|
||||
cond = map[string]sets.Set[string]{
|
||||
"foo": sets.New[string]("dev-foo1", "dev-foo2", "dev-foo3", "dev-foo4"),
|
||||
"bar": sets.New[string]("dev-bar1", "dev-bar2", "dev-bar3", "dev-bar4"),
|
||||
"baz": sets.New[string]("dev-baz1", "dev-baz2", "dev-baz3", "dev-bar4"),
|
||||
}
|
||||
resp = devs.Filter(cond)
|
||||
expected = `{"bar":{"dev-bar1":{"ID":"bar1"},"dev-bar2":{"ID":"bar2"},"dev-bar3":{"ID":"bar3"}},"baz":{"dev-baz1":{"ID":"baz1"},"dev-baz2":{"ID":"baz2"},"dev-baz3":{"ID":"baz3"}},"foo":{"dev-foo1":{"ID":"foo1"},"dev-foo2":{"ID":"foo2"},"dev-foo3":{"ID":"foo3"}}}`
|
||||
expectResourceDeviceInstances(t, resp, expected)
|
||||
|
||||
cond = map[string]sets.String{
|
||||
"foo": sets.NewString("dev-foo1", "dev-foo4", "dev-foo7"),
|
||||
"bar": sets.NewString("dev-bar1", "dev-bar4", "dev-bar7"),
|
||||
"baz": sets.NewString("dev-baz1", "dev-baz4", "dev-baz7"),
|
||||
cond = map[string]sets.Set[string]{
|
||||
"foo": sets.New[string]("dev-foo1", "dev-foo4", "dev-foo7"),
|
||||
"bar": sets.New[string]("dev-bar1", "dev-bar4", "dev-bar7"),
|
||||
"baz": sets.New[string]("dev-baz1", "dev-baz4", "dev-baz7"),
|
||||
}
|
||||
resp = devs.Filter(cond)
|
||||
expected = `{"bar":{"dev-bar1":{"ID":"bar1"}},"baz":{"dev-baz1":{"ID":"baz1"}},"foo":{"dev-foo1":{"ID":"foo1"}}}`
|
||||
|
@ -63,7 +63,7 @@ func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map
|
||||
continue
|
||||
}
|
||||
klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resource", resource, "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.String{}, requested)
|
||||
deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.Set[string]{}, requested)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -118,7 +118,7 @@ func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymana
|
||||
continue
|
||||
}
|
||||
klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resource", resource, "pod", klog.KObj(pod))
|
||||
deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.String{}, requested)
|
||||
deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.Set[string]{}, requested)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -132,7 +132,7 @@ func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymana
|
||||
|
||||
// Generate TopologyHints for this resource given the current
|
||||
// request size and the list of available devices.
|
||||
deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, sets.String{}, requested)
|
||||
deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, sets.Set[string]{}, requested)
|
||||
}
|
||||
|
||||
return deviceHints
|
||||
@ -148,12 +148,12 @@ func (m *ManagerImpl) deviceHasTopologyAlignment(resource string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *ManagerImpl) getAvailableDevices(resource string) sets.String {
|
||||
func (m *ManagerImpl) getAvailableDevices(resource string) sets.Set[string] {
|
||||
// Strip all devices in use from the list of healthy ones.
|
||||
return m.healthyDevices[resource].Difference(m.allocatedDevices[resource])
|
||||
}
|
||||
|
||||
func (m *ManagerImpl) generateDeviceTopologyHints(resource string, available sets.String, reusable sets.String, request int) []topologymanager.TopologyHint {
|
||||
func (m *ManagerImpl) generateDeviceTopologyHints(resource string, available sets.Set[string], reusable sets.Set[string], request int) []topologymanager.TopologyHint {
|
||||
// Initialize minAffinitySize to include all NUMA Nodes
|
||||
minAffinitySize := len(m.numaNodes)
|
||||
|
||||
|
@ -61,8 +61,8 @@ func TestGetTopologyHints(t *testing.T) {
|
||||
for _, tc := range tcases {
|
||||
m := ManagerImpl{
|
||||
allDevices: NewResourceDeviceInstances(),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
healthyDevices: make(map[string]sets.Set[string]),
|
||||
allocatedDevices: make(map[string]sets.Set[string]),
|
||||
podDevices: newPodDevices(),
|
||||
sourcesReady: &sourcesReadyStub{},
|
||||
activePods: func() []*v1.Pod { return []*v1.Pod{tc.pod} },
|
||||
@ -71,7 +71,7 @@ func TestGetTopologyHints(t *testing.T) {
|
||||
|
||||
for r := range tc.devices {
|
||||
m.allDevices[r] = make(DeviceInstances)
|
||||
m.healthyDevices[r] = sets.NewString()
|
||||
m.healthyDevices[r] = sets.New[string]()
|
||||
|
||||
for _, d := range tc.devices[r] {
|
||||
m.allDevices[r][d.ID] = d
|
||||
@ -84,7 +84,7 @@ func TestGetTopologyHints(t *testing.T) {
|
||||
for r, devices := range tc.allocatedDevices[p][c] {
|
||||
m.podDevices.insert(p, c, r, constructDevices(devices), nil)
|
||||
|
||||
m.allocatedDevices[r] = sets.NewString()
|
||||
m.allocatedDevices[r] = sets.New[string]()
|
||||
for _, d := range devices {
|
||||
m.allocatedDevices[r].Insert(d)
|
||||
}
|
||||
@ -414,8 +414,8 @@ func TestTopologyAlignedAllocation(t *testing.T) {
|
||||
for _, tc := range tcases {
|
||||
m := ManagerImpl{
|
||||
allDevices: NewResourceDeviceInstances(),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
healthyDevices: make(map[string]sets.Set[string]),
|
||||
allocatedDevices: make(map[string]sets.Set[string]),
|
||||
endpoints: make(map[string]endpointInfo),
|
||||
podDevices: newPodDevices(),
|
||||
sourcesReady: &sourcesReadyStub{},
|
||||
@ -424,7 +424,7 @@ func TestTopologyAlignedAllocation(t *testing.T) {
|
||||
}
|
||||
|
||||
m.allDevices[tc.resource] = make(DeviceInstances)
|
||||
m.healthyDevices[tc.resource] = sets.NewString()
|
||||
m.healthyDevices[tc.resource] = sets.New[string]()
|
||||
m.endpoints[tc.resource] = endpointInfo{}
|
||||
|
||||
for _, d := range tc.devices {
|
||||
@ -441,7 +441,7 @@ func TestTopologyAlignedAllocation(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
allocated, err := m.devicesToAllocate("podUID", "containerName", tc.resource, tc.request, sets.NewString())
|
||||
allocated, err := m.devicesToAllocate("podUID", "containerName", tc.resource, tc.request, sets.New[string]())
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
continue
|
||||
@ -603,8 +603,8 @@ func TestGetPreferredAllocationParameters(t *testing.T) {
|
||||
for _, tc := range tcases {
|
||||
m := ManagerImpl{
|
||||
allDevices: NewResourceDeviceInstances(),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
healthyDevices: make(map[string]sets.Set[string]),
|
||||
allocatedDevices: make(map[string]sets.Set[string]),
|
||||
endpoints: make(map[string]endpointInfo),
|
||||
podDevices: newPodDevices(),
|
||||
sourcesReady: &sourcesReadyStub{},
|
||||
@ -613,13 +613,13 @@ func TestGetPreferredAllocationParameters(t *testing.T) {
|
||||
}
|
||||
|
||||
m.allDevices[tc.resource] = make(DeviceInstances)
|
||||
m.healthyDevices[tc.resource] = sets.NewString()
|
||||
m.healthyDevices[tc.resource] = sets.New[string]()
|
||||
for _, d := range tc.allDevices {
|
||||
m.allDevices[tc.resource][d.ID] = d
|
||||
m.healthyDevices[tc.resource].Insert(d.ID)
|
||||
}
|
||||
|
||||
m.allocatedDevices[tc.resource] = sets.NewString()
|
||||
m.allocatedDevices[tc.resource] = sets.New[string]()
|
||||
for _, d := range tc.allocatedDevices {
|
||||
m.allocatedDevices[tc.resource].Insert(d)
|
||||
}
|
||||
@ -639,17 +639,17 @@ func TestGetPreferredAllocationParameters(t *testing.T) {
|
||||
opts: &pluginapi.DevicePluginOptions{GetPreferredAllocationAvailable: true},
|
||||
}
|
||||
|
||||
_, err := m.devicesToAllocate("podUID", "containerName", tc.resource, tc.request, sets.NewString(tc.reusableDevices...))
|
||||
_, err := m.devicesToAllocate("podUID", "containerName", tc.resource, tc.request, sets.New[string](tc.reusableDevices...))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !sets.NewString(actualAvailable...).Equal(sets.NewString(tc.expectedAvailable...)) {
|
||||
if !sets.New[string](actualAvailable...).Equal(sets.New[string](tc.expectedAvailable...)) {
|
||||
t.Errorf("%v. expected available: %v but got: %v", tc.description, tc.expectedAvailable, actualAvailable)
|
||||
}
|
||||
|
||||
if !sets.NewString(actualAvailable...).Equal(sets.NewString(tc.expectedAvailable...)) {
|
||||
if !sets.New[string](actualAvailable...).Equal(sets.New[string](tc.expectedAvailable...)) {
|
||||
t.Errorf("%v. expected mustInclude: %v but got: %v", tc.description, tc.expectedMustInclude, actualMustInclude)
|
||||
}
|
||||
|
||||
@ -903,11 +903,11 @@ func TestGetPodDeviceRequest(t *testing.T) {
|
||||
|
||||
for _, tc := range tcases {
|
||||
m := ManagerImpl{
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
healthyDevices: make(map[string]sets.Set[string]),
|
||||
}
|
||||
|
||||
for _, res := range tc.registeredDevices {
|
||||
m.healthyDevices[res] = sets.NewString()
|
||||
m.healthyDevices[res] = sets.New[string]()
|
||||
}
|
||||
|
||||
accumulatedResourceRequests := m.getPodDeviceRequest(tc.pod)
|
||||
@ -925,8 +925,8 @@ func TestGetPodTopologyHints(t *testing.T) {
|
||||
for _, tc := range tcases {
|
||||
m := ManagerImpl{
|
||||
allDevices: NewResourceDeviceInstances(),
|
||||
healthyDevices: make(map[string]sets.String),
|
||||
allocatedDevices: make(map[string]sets.String),
|
||||
healthyDevices: make(map[string]sets.Set[string]),
|
||||
allocatedDevices: make(map[string]sets.Set[string]),
|
||||
podDevices: newPodDevices(),
|
||||
sourcesReady: &sourcesReadyStub{},
|
||||
activePods: func() []*v1.Pod { return []*v1.Pod{tc.pod, {ObjectMeta: metav1.ObjectMeta{UID: "fakeOtherPod"}}} },
|
||||
@ -935,7 +935,7 @@ func TestGetPodTopologyHints(t *testing.T) {
|
||||
|
||||
for r := range tc.devices {
|
||||
m.allDevices[r] = make(DeviceInstances)
|
||||
m.healthyDevices[r] = sets.NewString()
|
||||
m.healthyDevices[r] = sets.New[string]()
|
||||
|
||||
for _, d := range tc.devices[r] {
|
||||
//add `pluginapi.Device` with Topology
|
||||
@ -949,7 +949,7 @@ func TestGetPodTopologyHints(t *testing.T) {
|
||||
for r, devices := range tc.allocatedDevices[p][c] {
|
||||
m.podDevices.insert(p, c, r, constructDevices(devices), nil)
|
||||
|
||||
m.allocatedDevices[r] = sets.NewString()
|
||||
m.allocatedDevices[r] = sets.New[string]()
|
||||
for _, d := range devices {
|
||||
m.allocatedDevices[r].Insert(d)
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ import (
|
||||
// Manager manages all the Device Plugins running on a node.
|
||||
type Manager interface {
|
||||
// Start starts device plugin registration service.
|
||||
Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.String) error
|
||||
Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.Set[string]) error
|
||||
|
||||
// Allocate configures and assigns devices to a container in a pod. From
|
||||
// the requested device resources, Allocate will communicate with the
|
||||
|
@ -52,14 +52,14 @@ func hardEvictionReservation(thresholds []evictionapi.Threshold, capacity v1.Res
|
||||
return ret
|
||||
}
|
||||
|
||||
func buildContainerMapAndRunningSetFromRuntime(ctx context.Context, runtimeService internalapi.RuntimeService) (containermap.ContainerMap, sets.String) {
|
||||
func buildContainerMapAndRunningSetFromRuntime(ctx context.Context, runtimeService internalapi.RuntimeService) (containermap.ContainerMap, sets.Set[string]) {
|
||||
podSandboxMap := make(map[string]string)
|
||||
podSandboxList, _ := runtimeService.ListPodSandbox(ctx, nil)
|
||||
for _, p := range podSandboxList {
|
||||
podSandboxMap[p.Id] = p.Metadata.Uid
|
||||
}
|
||||
|
||||
runningSet := sets.NewString()
|
||||
runningSet := sets.New[string]()
|
||||
containerMap := containermap.NewContainerMap()
|
||||
containerList, _ := runtimeService.ListContainers(ctx, nil)
|
||||
for _, c := range containerList {
|
||||
|
@ -23,7 +23,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
)
|
||||
|
||||
@ -39,7 +40,7 @@ func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, contain
|
||||
numaNodes := i.memoryManager.GetMemoryNUMANodes(pod, container)
|
||||
if numaNodes.Len() > 0 {
|
||||
var affinity []string
|
||||
for _, numaNode := range numaNodes.List() {
|
||||
for _, numaNode := range sets.List(numaNodes) {
|
||||
affinity = append(affinity, strconv.Itoa(numaNode))
|
||||
}
|
||||
containerConfig.Linux.Resources.CpusetMems = strings.Join(affinity, ",")
|
||||
|
@ -50,7 +50,7 @@ func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, contain
|
||||
klog.InfoS("Add container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID)
|
||||
}
|
||||
|
||||
func (m *fakeManager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Int {
|
||||
func (m *fakeManager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Set[int] {
|
||||
klog.InfoS("Get MemoryNUMANodes", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
return nil
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ type Manager interface {
|
||||
GetPodTopologyHints(*v1.Pod) map[string][]topologymanager.TopologyHint
|
||||
|
||||
// GetMemoryNUMANodes provides NUMA nodes that are used to allocate the container memory
|
||||
GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Int
|
||||
GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Set[int]
|
||||
|
||||
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
|
||||
GetAllocatableMemory() []state.Block
|
||||
@ -213,9 +213,9 @@ func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID
|
||||
}
|
||||
|
||||
// GetMemoryNUMANodes provides NUMA nodes that used to allocate the container memory
|
||||
func (m *manager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Int {
|
||||
func (m *manager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Set[int] {
|
||||
// Get NUMA node affinity of blocks assigned to the container during Allocate()
|
||||
numaNodes := sets.NewInt()
|
||||
numaNodes := sets.New[int]()
|
||||
for _, block := range m.state.GetMemoryBlocks(string(pod.UID), container.Name) {
|
||||
for _, nodeID := range block.NUMAAffinity {
|
||||
// avoid nodes duplication when hugepages and memory blocks pinned to the same NUMA node
|
||||
|
@ -30,11 +30,11 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
alphaOptions = sets.NewString()
|
||||
betaOptions = sets.NewString(
|
||||
alphaOptions = sets.New[string]()
|
||||
betaOptions = sets.New[string](
|
||||
PreferClosestNUMANodes,
|
||||
)
|
||||
stableOptions = sets.NewString()
|
||||
stableOptions = sets.New[string]()
|
||||
)
|
||||
|
||||
func CheckPolicyOptionAvailable(option string) error {
|
||||
|
@ -112,7 +112,7 @@ func TestNewTopologyManagerOptions(t *testing.T) {
|
||||
}
|
||||
|
||||
betaOptions.Insert(fancyBetaOption)
|
||||
alphaOptions = sets.NewString(fancyAlphaOption)
|
||||
alphaOptions = sets.New[string](fancyAlphaOption)
|
||||
|
||||
for _, tcase := range testCases {
|
||||
t.Run(tcase.description, func(t *testing.T) {
|
||||
|
Loading…
Reference in New Issue
Block a user