Merge pull request #120911 from gjkim42/devicemanager-remove-deprecated-sets-string

pkg/kubelet/cm: Remove deprecated sets.String and sets.Int
This commit is contained in:
Kubernetes Prow Robot 2023-10-16 16:48:40 +02:00 committed by GitHub
commit 0de29e1d43
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 168 additions and 166 deletions

View File

@ -801,7 +801,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
NodeAllocatableConfig: cm.NodeAllocatableConfig{ NodeAllocatableConfig: cm.NodeAllocatableConfig{
KubeReservedCgroupName: s.KubeReservedCgroup, KubeReservedCgroupName: s.KubeReservedCgroup,
SystemReservedCgroupName: s.SystemReservedCgroup, SystemReservedCgroupName: s.SystemReservedCgroup,
EnforceNodeAllocatable: sets.NewString(s.EnforceNodeAllocatable...), EnforceNodeAllocatable: sets.New(s.EnforceNodeAllocatable...),
KubeReserved: kubeReserved, KubeReserved: kubeReserved,
SystemReserved: systemReserved, SystemReserved: systemReserved,
ReservedSystemCPUs: reservedSystemCPUs, ReservedSystemCPUs: reservedSystemCPUs,

View File

@ -246,7 +246,7 @@ func (m *cgroupManagerImpl) Validate(name CgroupName) error {
} }
difference := neededControllers.Difference(enabledControllers) difference := neededControllers.Difference(enabledControllers)
if difference.Len() > 0 { if difference.Len() > 0 {
return fmt.Errorf("cgroup %q has some missing controllers: %v", name, strings.Join(difference.List(), ", ")) return fmt.Errorf("cgroup %q has some missing controllers: %v", name, strings.Join(sets.List(difference), ", "))
} }
return nil // valid V2 cgroup return nil // valid V2 cgroup
} }
@ -260,7 +260,7 @@ func (m *cgroupManagerImpl) Validate(name CgroupName) error {
// scoped to the set control groups it understands. this is being discussed // scoped to the set control groups it understands. this is being discussed
// in https://github.com/opencontainers/runc/issues/1440 // in https://github.com/opencontainers/runc/issues/1440
// once resolved, we can remove this code. // once resolved, we can remove this code.
allowlistControllers := sets.NewString("cpu", "cpuacct", "cpuset", "memory", "systemd", "pids") allowlistControllers := sets.New[string]("cpu", "cpuacct", "cpuset", "memory", "systemd", "pids")
if _, ok := m.subsystems.MountPoints["hugetlb"]; ok { if _, ok := m.subsystems.MountPoints["hugetlb"]; ok {
allowlistControllers.Insert("hugetlb") allowlistControllers.Insert("hugetlb")
@ -322,24 +322,24 @@ func getCPUWeight(cpuShares *uint64) uint64 {
} }
// readUnifiedControllers reads the controllers available at the specified cgroup // readUnifiedControllers reads the controllers available at the specified cgroup
func readUnifiedControllers(path string) (sets.String, error) { func readUnifiedControllers(path string) (sets.Set[string], error) {
controllersFileContent, err := os.ReadFile(filepath.Join(path, "cgroup.controllers")) controllersFileContent, err := os.ReadFile(filepath.Join(path, "cgroup.controllers"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
controllers := strings.Fields(string(controllersFileContent)) controllers := strings.Fields(string(controllersFileContent))
return sets.NewString(controllers...), nil return sets.New(controllers...), nil
} }
var ( var (
availableRootControllersOnce sync.Once availableRootControllersOnce sync.Once
availableRootControllers sets.String availableRootControllers sets.Set[string]
) )
// getSupportedUnifiedControllers returns a set of supported controllers when running on cgroup v2 // getSupportedUnifiedControllers returns a set of supported controllers when running on cgroup v2
func getSupportedUnifiedControllers() sets.String { func getSupportedUnifiedControllers() sets.Set[string] {
// This is the set of controllers used by the Kubelet // This is the set of controllers used by the Kubelet
supportedControllers := sets.NewString("cpu", "cpuset", "memory", "hugetlb", "pids") supportedControllers := sets.New("cpu", "cpuset", "memory", "hugetlb", "pids")
// Memoize the set of controllers that are present in the root cgroup // Memoize the set of controllers that are present in the root cgroup
availableRootControllersOnce.Do(func() { availableRootControllersOnce.Do(func() {
var err error var err error
@ -407,7 +407,7 @@ func (m *cgroupManagerImpl) maybeSetHugetlb(resourceConfig *ResourceConfig, reso
} }
// For each page size enumerated, set that value. // For each page size enumerated, set that value.
pageSizes := sets.NewString() pageSizes := sets.New[string]()
for pageSize, limit := range resourceConfig.HugePageLimit { for pageSize, limit := range resourceConfig.HugePageLimit {
sizeString, err := v1helper.HugePageUnitSizeFromByteSize(pageSize) sizeString, err := v1helper.HugePageUnitSizeFromByteSize(pageSize)
if err != nil { if err != nil {
@ -485,7 +485,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int {
cgroupFsName := m.Name(name) cgroupFsName := m.Name(name)
// Get a list of processes that we need to kill // Get a list of processes that we need to kill
pidsToKill := sets.NewInt() pidsToKill := sets.New[int]()
var pids []int var pids []int
for _, val := range m.subsystems.MountPoints { for _, val := range m.subsystems.MountPoints {
dir := path.Join(val, cgroupFsName) dir := path.Join(val, cgroupFsName)
@ -526,7 +526,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int {
klog.V(4).InfoS("Cgroup manager encountered error scanning pids for directory", "path", dir, "err", err) klog.V(4).InfoS("Cgroup manager encountered error scanning pids for directory", "path", dir, "err", err)
} }
} }
return pidsToKill.List() return sets.List(pidsToKill)
} }
// ReduceCPULimits reduces the cgroup's cpu shares to the lowest possible value // ReduceCPULimits reduces the cgroup's cpu shares to the lowest possible value

View File

@ -164,7 +164,7 @@ type NodeAllocatableConfig struct {
KubeReservedCgroupName string KubeReservedCgroupName string
SystemReservedCgroupName string SystemReservedCgroupName string
ReservedSystemCPUs cpuset.CPUSet ReservedSystemCPUs cpuset.CPUSet
EnforceNodeAllocatable sets.String EnforceNodeAllocatable sets.Set[string]
KubeReserved v1.ResourceList KubeReserved v1.ResourceList
SystemReserved v1.ResourceList SystemReserved v1.ResourceList
HardEvictionThresholds []evictionapi.Threshold HardEvictionThresholds []evictionapi.Threshold

View File

@ -161,7 +161,7 @@ func validateSystemRequirements(mountUtil mount.Interface) (features, error) {
return f, nil return f, nil
} }
expectedCgroups := sets.NewString("cpu", "cpuacct", "cpuset", "memory") expectedCgroups := sets.New("cpu", "cpuacct", "cpuset", "memory")
for _, mountPoint := range mountPoints { for _, mountPoint := range mountPoints {
if mountPoint.Type == cgroupMountType { if mountPoint.Type == cgroupMountType {
for _, opt := range mountPoint.Opts { for _, opt := range mountPoint.Opts {
@ -176,7 +176,7 @@ func validateSystemRequirements(mountUtil mount.Interface) (features, error) {
} }
if expectedCgroups.Len() > 0 { if expectedCgroups.Len() > 0 {
return f, fmt.Errorf("%s - Following Cgroup subsystem not mounted: %v", localErr, expectedCgroups.List()) return f, fmt.Errorf("%s - Following Cgroup subsystem not mounted: %v", localErr, sets.List(expectedCgroups))
} }
// Check if cpu quota is available. // Check if cpu quota is available.

View File

@ -35,14 +35,14 @@ const (
) )
var ( var (
alphaOptions = sets.NewString( alphaOptions = sets.New[string](
DistributeCPUsAcrossNUMAOption, DistributeCPUsAcrossNUMAOption,
AlignBySocketOption, AlignBySocketOption,
) )
betaOptions = sets.NewString( betaOptions = sets.New[string](
FullPCPUsOnlyOption, FullPCPUsOnlyOption,
) )
stableOptions = sets.NewString() stableOptions = sets.New[string]()
) )
// CheckPolicyOptionAvailable verifies if the given option can be used depending on the Feature Gate Settings. // CheckPolicyOptionAvailable verifies if the given option can be used depending on the Feature Gate Settings.

View File

@ -62,9 +62,9 @@ func NewDevicesPerNUMA() DevicesPerNUMA {
} }
// Devices is a function that returns all device ids for all NUMA nodes // Devices is a function that returns all device ids for all NUMA nodes
// and represent it as sets.String // and represent it as sets.Set[string]
func (dev DevicesPerNUMA) Devices() sets.String { func (dev DevicesPerNUMA) Devices() sets.Set[string] {
result := sets.NewString() result := sets.New[string]()
for _, devs := range dev { for _, devs := range dev {
result.Insert(devs...) result.Insert(devs...)

View File

@ -74,13 +74,13 @@ type ManagerImpl struct {
allDevices ResourceDeviceInstances allDevices ResourceDeviceInstances
// healthyDevices contains all of the registered healthy resourceNames and their exported device IDs. // healthyDevices contains all of the registered healthy resourceNames and their exported device IDs.
healthyDevices map[string]sets.String healthyDevices map[string]sets.Set[string]
// unhealthyDevices contains all of the unhealthy devices and their exported device IDs. // unhealthyDevices contains all of the unhealthy devices and their exported device IDs.
unhealthyDevices map[string]sets.String unhealthyDevices map[string]sets.Set[string]
// allocatedDevices contains allocated deviceIds, keyed by resourceName. // allocatedDevices contains allocated deviceIds, keyed by resourceName.
allocatedDevices map[string]sets.String allocatedDevices map[string]sets.Set[string]
// podDevices contains pod to allocated device mapping. // podDevices contains pod to allocated device mapping.
podDevices *podDevices podDevices *podDevices
@ -106,7 +106,7 @@ type ManagerImpl struct {
// containerRunningSet identifies which container among those present in `containerMap` // containerRunningSet identifies which container among those present in `containerMap`
// was reported running by the container runtime when `containerMap` was computed. // was reported running by the container runtime when `containerMap` was computed.
// Used to detect pods running across a restart // Used to detect pods running across a restart
containerRunningSet sets.String containerRunningSet sets.Set[string]
} }
type endpointInfo struct { type endpointInfo struct {
@ -117,7 +117,7 @@ type endpointInfo struct {
type sourcesReadyStub struct{} type sourcesReadyStub struct{}
// PodReusableDevices is a map by pod name of devices to reuse. // PodReusableDevices is a map by pod name of devices to reuse.
type PodReusableDevices map[string]map[string]sets.String type PodReusableDevices map[string]map[string]sets.Set[string]
func (s *sourcesReadyStub) AddSource(source string) {} func (s *sourcesReadyStub) AddSource(source string) {}
func (s *sourcesReadyStub) AllReady() bool { return true } func (s *sourcesReadyStub) AllReady() bool { return true }
@ -143,9 +143,9 @@ func newManagerImpl(socketPath string, topology []cadvisorapi.Node, topologyAffi
endpoints: make(map[string]endpointInfo), endpoints: make(map[string]endpointInfo),
allDevices: NewResourceDeviceInstances(), allDevices: NewResourceDeviceInstances(),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.Set[string]),
unhealthyDevices: make(map[string]sets.String), unhealthyDevices: make(map[string]sets.Set[string]),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.Set[string]),
podDevices: newPodDevices(), podDevices: newPodDevices(),
numaNodes: numaNodes, numaNodes: numaNodes,
topologyAffinityStore: topologyAffinityStore, topologyAffinityStore: topologyAffinityStore,
@ -259,8 +259,8 @@ func (m *ManagerImpl) PluginListAndWatchReceiver(resourceName string, resp *plug
func (m *ManagerImpl) genericDeviceUpdateCallback(resourceName string, devices []pluginapi.Device) { func (m *ManagerImpl) genericDeviceUpdateCallback(resourceName string, devices []pluginapi.Device) {
healthyCount := 0 healthyCount := 0
m.mutex.Lock() m.mutex.Lock()
m.healthyDevices[resourceName] = sets.NewString() m.healthyDevices[resourceName] = sets.New[string]()
m.unhealthyDevices[resourceName] = sets.NewString() m.unhealthyDevices[resourceName] = sets.New[string]()
m.allDevices[resourceName] = make(map[string]pluginapi.Device) m.allDevices[resourceName] = make(map[string]pluginapi.Device)
for _, dev := range devices { for _, dev := range devices {
m.allDevices[resourceName][dev.ID] = dev m.allDevices[resourceName][dev.ID] = dev
@ -291,7 +291,7 @@ func (m *ManagerImpl) checkpointFile() string {
// Start starts the Device Plugin Manager and start initialization of // Start starts the Device Plugin Manager and start initialization of
// podDevices and allocatedDevices information from checkpointed state and // podDevices and allocatedDevices information from checkpointed state and
// starts device plugin registration service. // starts device plugin registration service.
func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.String) error { func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.Set[string]) error {
klog.V(2).InfoS("Starting Device Plugin manager") klog.V(2).InfoS("Starting Device Plugin manager")
m.activePods = activePods m.activePods = activePods
@ -323,7 +323,7 @@ func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error {
m.setPodPendingAdmission(pod) m.setPodPendingAdmission(pod)
if _, ok := m.devicesToReuse[string(pod.UID)]; !ok { if _, ok := m.devicesToReuse[string(pod.UID)]; !ok {
m.devicesToReuse[string(pod.UID)] = make(map[string]sets.String) m.devicesToReuse[string(pod.UID)] = make(map[string]sets.Set[string])
} }
// If pod entries to m.devicesToReuse other than the current pod exist, delete them. // If pod entries to m.devicesToReuse other than the current pod exist, delete them.
for podUID := range m.devicesToReuse { for podUID := range m.devicesToReuse {
@ -365,13 +365,13 @@ func (m *ManagerImpl) UpdatePluginResources(node *schedulerframework.NodeInfo, a
func (m *ManagerImpl) markResourceUnhealthy(resourceName string) { func (m *ManagerImpl) markResourceUnhealthy(resourceName string) {
klog.V(2).InfoS("Mark all resources Unhealthy for resource", "resourceName", resourceName) klog.V(2).InfoS("Mark all resources Unhealthy for resource", "resourceName", resourceName)
healthyDevices := sets.NewString() healthyDevices := sets.New[string]()
if _, ok := m.healthyDevices[resourceName]; ok { if _, ok := m.healthyDevices[resourceName]; ok {
healthyDevices = m.healthyDevices[resourceName] healthyDevices = m.healthyDevices[resourceName]
m.healthyDevices[resourceName] = sets.NewString() m.healthyDevices[resourceName] = sets.New[string]()
} }
if _, ok := m.unhealthyDevices[resourceName]; !ok { if _, ok := m.unhealthyDevices[resourceName]; !ok {
m.unhealthyDevices[resourceName] = sets.NewString() m.unhealthyDevices[resourceName] = sets.New[string]()
} }
m.unhealthyDevices[resourceName] = m.unhealthyDevices[resourceName].Union(healthyDevices) m.unhealthyDevices[resourceName] = m.unhealthyDevices[resourceName].Union(healthyDevices)
} }
@ -392,7 +392,7 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string)
needsUpdateCheckpoint := false needsUpdateCheckpoint := false
var capacity = v1.ResourceList{} var capacity = v1.ResourceList{}
var allocatable = v1.ResourceList{} var allocatable = v1.ResourceList{}
deletedResources := sets.NewString() deletedResources := sets.New[string]()
m.mutex.Lock() m.mutex.Lock()
for resourceName, devices := range m.healthyDevices { for resourceName, devices := range m.healthyDevices {
eI, ok := m.endpoints[resourceName] eI, ok := m.endpoints[resourceName]
@ -492,8 +492,8 @@ func (m *ManagerImpl) readCheckpoint() error {
for resource := range registeredDevs { for resource := range registeredDevs {
// During start up, creates empty healthyDevices list so that the resource capacity // During start up, creates empty healthyDevices list so that the resource capacity
// will stay zero till the corresponding device plugin re-registers. // will stay zero till the corresponding device plugin re-registers.
m.healthyDevices[resource] = sets.NewString() m.healthyDevices[resource] = sets.New[string]()
m.unhealthyDevices[resource] = sets.NewString() m.unhealthyDevices[resource] = sets.New[string]()
m.endpoints[resource] = endpointInfo{e: newStoppedEndpointImpl(resource), opts: nil} m.endpoints[resource] = endpointInfo{e: newStoppedEndpointImpl(resource), opts: nil}
} }
return nil return nil
@ -536,15 +536,15 @@ func (m *ManagerImpl) UpdateAllocatedDevices() {
if len(podsToBeRemoved) <= 0 { if len(podsToBeRemoved) <= 0 {
return return
} }
klog.V(3).InfoS("Pods to be removed", "podUIDs", podsToBeRemoved.List()) klog.V(3).InfoS("Pods to be removed", "podUIDs", sets.List(podsToBeRemoved))
m.podDevices.delete(podsToBeRemoved.List()) m.podDevices.delete(sets.List(podsToBeRemoved))
// Regenerated allocatedDevices after we update pod allocation information. // Regenerated allocatedDevices after we update pod allocation information.
m.allocatedDevices = m.podDevices.devices() m.allocatedDevices = m.podDevices.devices()
} }
// Returns list of device Ids we need to allocate with Allocate rpc call. // Returns list of device Ids we need to allocate with Allocate rpc call.
// Returns empty list in case we don't need to issue the Allocate rpc call. // Returns empty list in case we don't need to issue the Allocate rpc call.
func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, required int, reusableDevices sets.String) (sets.String, error) { func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, required int, reusableDevices sets.Set[string]) (sets.Set[string], error) {
m.mutex.Lock() m.mutex.Lock()
defer m.mutex.Unlock() defer m.mutex.Unlock()
needed := required needed := required
@ -552,7 +552,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
// This can happen if a container restarts for example. // This can happen if a container restarts for example.
devices := m.podDevices.containerDevices(podUID, contName, resource) devices := m.podDevices.containerDevices(podUID, contName, resource)
if devices != nil { if devices != nil {
klog.V(3).InfoS("Found pre-allocated devices for resource on pod", "resourceName", resource, "containerName", contName, "podUID", string(podUID), "devices", devices.List()) klog.V(3).InfoS("Found pre-allocated devices for resource on pod", "resourceName", resource, "containerName", contName, "podUID", string(podUID), "devices", sets.List(devices))
needed = needed - devices.Len() needed = needed - devices.Len()
// A pod's resource is not expected to change once admitted by the API server, // A pod's resource is not expected to change once admitted by the API server,
// so just fail loudly here. We can revisit this part if this no longer holds. // so just fail loudly here. We can revisit this part if this no longer holds.
@ -610,11 +610,11 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
// Declare the list of allocated devices. // Declare the list of allocated devices.
// This will be populated and returned below. // This will be populated and returned below.
allocated := sets.NewString() allocated := sets.New[string]()
// Create a closure to help with device allocation // Create a closure to help with device allocation
// Returns 'true' once no more devices need to be allocated. // Returns 'true' once no more devices need to be allocated.
allocateRemainingFrom := func(devices sets.String) bool { allocateRemainingFrom := func(devices sets.Set[string]) bool {
for device := range devices.Difference(allocated) { for device := range devices.Difference(allocated) {
m.allocatedDevices[resource].Insert(device) m.allocatedDevices[resource].Insert(device)
allocated.Insert(device) allocated.Insert(device)
@ -628,7 +628,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
// Needs to allocate additional devices. // Needs to allocate additional devices.
if m.allocatedDevices[resource] == nil { if m.allocatedDevices[resource] == nil {
m.allocatedDevices[resource] = sets.NewString() m.allocatedDevices[resource] = sets.New[string]()
} }
// Allocates from reusableDevices list first. // Allocates from reusableDevices list first.
@ -697,22 +697,22 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
return nil, fmt.Errorf("unexpectedly allocated less resources than required. Requested: %d, Got: %d", required, required-needed) return nil, fmt.Errorf("unexpectedly allocated less resources than required. Requested: %d, Got: %d", required, required-needed)
} }
func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, available sets.String) (sets.String, sets.String, sets.String) { func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, available sets.Set[string]) (sets.Set[string], sets.Set[string], sets.Set[string]) {
// If alignment information is not available, just pass the available list back. // If alignment information is not available, just pass the available list back.
hint := m.topologyAffinityStore.GetAffinity(podUID, contName) hint := m.topologyAffinityStore.GetAffinity(podUID, contName)
if !m.deviceHasTopologyAlignment(resource) || hint.NUMANodeAffinity == nil { if !m.deviceHasTopologyAlignment(resource) || hint.NUMANodeAffinity == nil {
return sets.NewString(), sets.NewString(), available return sets.New[string](), sets.New[string](), available
} }
// Build a map of NUMA Nodes to the devices associated with them. A // Build a map of NUMA Nodes to the devices associated with them. A
// device may be associated to multiple NUMA nodes at the same time. If an // device may be associated to multiple NUMA nodes at the same time. If an
// available device does not have any NUMA Nodes associated with it, add it // available device does not have any NUMA Nodes associated with it, add it
// to a list of NUMA Nodes for the fake NUMANode -1. // to a list of NUMA Nodes for the fake NUMANode -1.
perNodeDevices := make(map[int]sets.String) perNodeDevices := make(map[int]sets.Set[string])
for d := range available { for d := range available {
if m.allDevices[resource][d].Topology == nil || len(m.allDevices[resource][d].Topology.Nodes) == 0 { if m.allDevices[resource][d].Topology == nil || len(m.allDevices[resource][d].Topology.Nodes) == 0 {
if _, ok := perNodeDevices[nodeWithoutTopology]; !ok { if _, ok := perNodeDevices[nodeWithoutTopology]; !ok {
perNodeDevices[nodeWithoutTopology] = sets.NewString() perNodeDevices[nodeWithoutTopology] = sets.New[string]()
} }
perNodeDevices[nodeWithoutTopology].Insert(d) perNodeDevices[nodeWithoutTopology].Insert(d)
continue continue
@ -720,7 +720,7 @@ func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, availa
for _, node := range m.allDevices[resource][d].Topology.Nodes { for _, node := range m.allDevices[resource][d].Topology.Nodes {
if _, ok := perNodeDevices[int(node.ID)]; !ok { if _, ok := perNodeDevices[int(node.ID)]; !ok {
perNodeDevices[int(node.ID)] = sets.NewString() perNodeDevices[int(node.ID)] = sets.New[string]()
} }
perNodeDevices[int(node.ID)].Insert(d) perNodeDevices[int(node.ID)].Insert(d)
} }
@ -791,14 +791,14 @@ func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, availa
} }
// Return all three lists containing the full set of devices across them. // Return all three lists containing the full set of devices across them.
return sets.NewString(fromAffinity...), sets.NewString(notFromAffinity...), sets.NewString(withoutTopology...) return sets.New[string](fromAffinity...), sets.New[string](notFromAffinity...), sets.New[string](withoutTopology...)
} }
// allocateContainerResources attempts to allocate all of required device // allocateContainerResources attempts to allocate all of required device
// plugin resources for the input container, issues an Allocate rpc request // plugin resources for the input container, issues an Allocate rpc request
// for each new device resource requirement, processes their AllocateResponses, // for each new device resource requirement, processes their AllocateResponses,
// and updates the cached containerDevices on success. // and updates the cached containerDevices on success.
func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Container, devicesToReuse map[string]sets.String) error { func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Container, devicesToReuse map[string]sets.Set[string]) error {
podUID := string(pod.UID) podUID := string(pod.UID)
contName := container.Name contName := container.Name
allocatedDevicesUpdated := false allocatedDevicesUpdated := false
@ -981,7 +981,7 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s
// callGetPreferredAllocationIfAvailable issues GetPreferredAllocation grpc // callGetPreferredAllocationIfAvailable issues GetPreferredAllocation grpc
// call for device plugin resource with GetPreferredAllocationAvailable option set. // call for device plugin resource with GetPreferredAllocationAvailable option set.
func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, resource string, available, mustInclude sets.String, size int) (sets.String, error) { func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, resource string, available, mustInclude sets.Set[string], size int) (sets.Set[string], error) {
eI, ok := m.endpoints[resource] eI, ok := m.endpoints[resource]
if !ok { if !ok {
return nil, fmt.Errorf("endpoint not found in cache for a registered resource: %s", resource) return nil, fmt.Errorf("endpoint not found in cache for a registered resource: %s", resource)
@ -1000,9 +1000,9 @@ func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, re
return nil, fmt.Errorf("device plugin GetPreferredAllocation rpc failed with err: %v", err) return nil, fmt.Errorf("device plugin GetPreferredAllocation rpc failed with err: %v", err)
} }
if resp != nil && len(resp.ContainerResponses) > 0 { if resp != nil && len(resp.ContainerResponses) > 0 {
return sets.NewString(resp.ContainerResponses[0].DeviceIDs...), nil return sets.New[string](resp.ContainerResponses[0].DeviceIDs...), nil
} }
return sets.NewString(), nil return sets.New[string](), nil
} }
// sanitizeNodeAllocatable scans through allocatedDevices in the device manager // sanitizeNodeAllocatable scans through allocatedDevices in the device manager

View File

@ -287,7 +287,7 @@ func setupDeviceManager(t *testing.T, devs []*pluginapi.Device, callback monitor
// test steady state, initialization where sourcesReady, containerMap and containerRunningSet // test steady state, initialization where sourcesReady, containerMap and containerRunningSet
// are relevant will be tested with a different flow // are relevant will be tested with a different flow
err = w.Start(activePods, &sourcesReadyStub{}, containermap.NewContainerMap(), sets.NewString()) err = w.Start(activePods, &sourcesReadyStub{}, containermap.NewContainerMap(), sets.New[string]())
require.NoError(t, err) require.NoError(t, err)
return w, updateChan return w, updateChan
@ -312,6 +312,7 @@ func setupPluginManager(t *testing.T, pluginSocketName string, m Manager) plugin
} }
func runPluginManager(pluginManager pluginmanager.PluginManager) { func runPluginManager(pluginManager pluginmanager.PluginManager) {
// FIXME: Replace sets.String with sets.Set[string]
sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true }) sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true })
go pluginManager.Run(sourcesReady, wait.NeverStop) go pluginManager.Run(sourcesReady, wait.NeverStop)
} }
@ -459,8 +460,8 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
// properly rejected instead of being incorrectly started. // properly rejected instead of being incorrectly started.
err = testManager.writeCheckpoint() err = testManager.writeCheckpoint()
as.Nil(err) as.Nil(err)
testManager.healthyDevices = make(map[string]sets.String) testManager.healthyDevices = make(map[string]sets.Set[string])
testManager.unhealthyDevices = make(map[string]sets.String) testManager.unhealthyDevices = make(map[string]sets.Set[string])
err = testManager.readCheckpoint() err = testManager.readCheckpoint()
as.Nil(err) as.Nil(err)
as.Equal(1, len(testManager.endpoints)) as.Equal(1, len(testManager.endpoints))
@ -673,9 +674,9 @@ func TestCheckpoint(t *testing.T) {
as.Nil(err) as.Nil(err)
testManager := &ManagerImpl{ testManager := &ManagerImpl{
endpoints: make(map[string]endpointInfo), endpoints: make(map[string]endpointInfo),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.Set[string]),
unhealthyDevices: make(map[string]sets.String), unhealthyDevices: make(map[string]sets.Set[string]),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.Set[string]),
podDevices: newPodDevices(), podDevices: newPodDevices(),
checkpointManager: ckm, checkpointManager: ckm,
} }
@ -718,16 +719,16 @@ func TestCheckpoint(t *testing.T) {
), ),
) )
testManager.healthyDevices[resourceName1] = sets.NewString() testManager.healthyDevices[resourceName1] = sets.New[string]()
testManager.healthyDevices[resourceName1].Insert("dev1") testManager.healthyDevices[resourceName1].Insert("dev1")
testManager.healthyDevices[resourceName1].Insert("dev2") testManager.healthyDevices[resourceName1].Insert("dev2")
testManager.healthyDevices[resourceName1].Insert("dev3") testManager.healthyDevices[resourceName1].Insert("dev3")
testManager.healthyDevices[resourceName1].Insert("dev4") testManager.healthyDevices[resourceName1].Insert("dev4")
testManager.healthyDevices[resourceName1].Insert("dev5") testManager.healthyDevices[resourceName1].Insert("dev5")
testManager.healthyDevices[resourceName2] = sets.NewString() testManager.healthyDevices[resourceName2] = sets.New[string]()
testManager.healthyDevices[resourceName2].Insert("dev1") testManager.healthyDevices[resourceName2].Insert("dev1")
testManager.healthyDevices[resourceName2].Insert("dev2") testManager.healthyDevices[resourceName2].Insert("dev2")
testManager.healthyDevices[resourceName3] = sets.NewString() testManager.healthyDevices[resourceName3] = sets.New[string]()
testManager.healthyDevices[resourceName3].Insert("dev5") testManager.healthyDevices[resourceName3].Insert("dev5")
expectedPodDevices := testManager.podDevices expectedPodDevices := testManager.podDevices
@ -827,9 +828,9 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso
return nil, err return nil, err
} }
m := &ManagerImpl{ m := &ManagerImpl{
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.Set[string]),
unhealthyDevices: make(map[string]sets.String), unhealthyDevices: make(map[string]sets.Set[string]),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.Set[string]),
endpoints: make(map[string]endpointInfo), endpoints: make(map[string]endpointInfo),
podDevices: newPodDevices(), podDevices: newPodDevices(),
devicesToReuse: make(PodReusableDevices), devicesToReuse: make(PodReusableDevices),
@ -846,7 +847,7 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso
} }
for _, res := range testRes { for _, res := range testRes {
testManager.healthyDevices[res.resourceName] = sets.NewString(res.devs.Devices().UnsortedList()...) testManager.healthyDevices[res.resourceName] = sets.New[string](res.devs.Devices().UnsortedList()...)
if res.resourceName == "domain1.com/resource1" { if res.resourceName == "domain1.com/resource1" {
testManager.endpoints[res.resourceName] = endpointInfo{ testManager.endpoints[res.resourceName] = endpointInfo{
e: &MockEndpoint{allocateFunc: allocateStubFunc()}, e: &MockEndpoint{allocateFunc: allocateStubFunc()},
@ -953,22 +954,22 @@ func TestFilterByAffinity(t *testing.T) {
} }
testCases := []struct { testCases := []struct {
available sets.String available sets.Set[string]
fromAffinityExpected sets.String fromAffinityExpected sets.Set[string]
notFromAffinityExpected sets.String notFromAffinityExpected sets.Set[string]
withoutTopologyExpected sets.String withoutTopologyExpected sets.Set[string]
}{ }{
{ {
available: sets.NewString("dev1", "dev2"), available: sets.New[string]("dev1", "dev2"),
fromAffinityExpected: sets.NewString("dev2"), fromAffinityExpected: sets.New[string]("dev2"),
notFromAffinityExpected: sets.NewString("dev1"), notFromAffinityExpected: sets.New[string]("dev1"),
withoutTopologyExpected: sets.NewString(), withoutTopologyExpected: sets.New[string](),
}, },
{ {
available: sets.NewString("dev1", "dev2", "dev3", "dev4"), available: sets.New[string]("dev1", "dev2", "dev3", "dev4"),
fromAffinityExpected: sets.NewString("dev2", "dev3", "dev4"), fromAffinityExpected: sets.New[string]("dev2", "dev3", "dev4"),
notFromAffinityExpected: sets.NewString("dev1"), notFromAffinityExpected: sets.New[string]("dev1"),
withoutTopologyExpected: sets.NewString(), withoutTopologyExpected: sets.New[string](),
}, },
} }
@ -1087,9 +1088,9 @@ func TestPodContainerDeviceToAllocate(t *testing.T) {
testManager := &ManagerImpl{ testManager := &ManagerImpl{
endpoints: make(map[string]endpointInfo), endpoints: make(map[string]endpointInfo),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.Set[string]),
unhealthyDevices: make(map[string]sets.String), unhealthyDevices: make(map[string]sets.Set[string]),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.Set[string]),
podDevices: newPodDevices(), podDevices: newPodDevices(),
activePods: func() []*v1.Pod { return []*v1.Pod{} }, activePods: func() []*v1.Pod { return []*v1.Pod{} },
sourcesReady: &sourcesReadyStub{}, sourcesReady: &sourcesReadyStub{},
@ -1121,8 +1122,8 @@ func TestPodContainerDeviceToAllocate(t *testing.T) {
// no healthy devices for resourceName1 and devices corresponding to // no healthy devices for resourceName1 and devices corresponding to
// resource2 are intentionally omitted to simulate that the resource // resource2 are intentionally omitted to simulate that the resource
// hasn't been registered. // hasn't been registered.
testManager.healthyDevices[resourceName1] = sets.NewString() testManager.healthyDevices[resourceName1] = sets.New[string]()
testManager.healthyDevices[resourceName3] = sets.NewString() testManager.healthyDevices[resourceName3] = sets.New[string]()
// dev5 is no longer in the list of healthy devices // dev5 is no longer in the list of healthy devices
testManager.healthyDevices[resourceName3].Insert("dev7") testManager.healthyDevices[resourceName3].Insert("dev7")
testManager.healthyDevices[resourceName3].Insert("dev8") testManager.healthyDevices[resourceName3].Insert("dev8")
@ -1133,8 +1134,8 @@ func TestPodContainerDeviceToAllocate(t *testing.T) {
contName string contName string
resource string resource string
required int required int
reusableDevices sets.String reusableDevices sets.Set[string]
expectedAllocatedDevices sets.String expectedAllocatedDevices sets.Set[string]
expErr error expErr error
}{ }{
{ {
@ -1143,7 +1144,7 @@ func TestPodContainerDeviceToAllocate(t *testing.T) {
contName: "con1", contName: "con1",
resource: resourceName1, resource: resourceName1,
required: 2, required: 2,
reusableDevices: sets.NewString(), reusableDevices: sets.New[string](),
expectedAllocatedDevices: nil, expectedAllocatedDevices: nil,
expErr: fmt.Errorf("no healthy devices present; cannot allocate unhealthy devices %s", resourceName1), expErr: fmt.Errorf("no healthy devices present; cannot allocate unhealthy devices %s", resourceName1),
}, },
@ -1153,7 +1154,7 @@ func TestPodContainerDeviceToAllocate(t *testing.T) {
contName: "con2", contName: "con2",
resource: resourceName2, resource: resourceName2,
required: 1, required: 1,
reusableDevices: sets.NewString(), reusableDevices: sets.New[string](),
expectedAllocatedDevices: nil, expectedAllocatedDevices: nil,
expErr: fmt.Errorf("cannot allocate unregistered device %s", resourceName2), expErr: fmt.Errorf("cannot allocate unregistered device %s", resourceName2),
}, },
@ -1163,7 +1164,7 @@ func TestPodContainerDeviceToAllocate(t *testing.T) {
contName: "con3", contName: "con3",
resource: resourceName3, resource: resourceName3,
required: 1, required: 1,
reusableDevices: sets.NewString(), reusableDevices: sets.New[string](),
expectedAllocatedDevices: nil, expectedAllocatedDevices: nil,
expErr: fmt.Errorf("previously allocated devices are no longer healthy; cannot allocate unhealthy devices %s", resourceName3), expErr: fmt.Errorf("previously allocated devices are no longer healthy; cannot allocate unhealthy devices %s", resourceName3),
}, },
@ -1366,8 +1367,8 @@ func TestUpdatePluginResources(t *testing.T) {
ckm, err := checkpointmanager.NewCheckpointManager(tmpDir) ckm, err := checkpointmanager.NewCheckpointManager(tmpDir)
as.Nil(err) as.Nil(err)
m := &ManagerImpl{ m := &ManagerImpl{
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.Set[string]),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.Set[string]),
podDevices: newPodDevices(), podDevices: newPodDevices(),
checkpointManager: ckm, checkpointManager: ckm,
} }
@ -1378,9 +1379,9 @@ func TestUpdatePluginResources(t *testing.T) {
testManager.podDevices.devs[string(pod.UID)] = make(containerDevices) testManager.podDevices.devs[string(pod.UID)] = make(containerDevices)
// require one of resource1 and one of resource2 // require one of resource1 and one of resource2
testManager.allocatedDevices[resourceName1] = sets.NewString() testManager.allocatedDevices[resourceName1] = sets.New[string]()
testManager.allocatedDevices[resourceName1].Insert(devID1) testManager.allocatedDevices[resourceName1].Insert(devID1)
testManager.allocatedDevices[resourceName2] = sets.NewString() testManager.allocatedDevices[resourceName2] = sets.New[string]()
testManager.allocatedDevices[resourceName2].Insert(devID2) testManager.allocatedDevices[resourceName2].Insert(devID2)
cachedNode := &v1.Node{ cachedNode := &v1.Node{
@ -1486,9 +1487,9 @@ func TestResetExtendedResource(t *testing.T) {
as.Nil(err) as.Nil(err)
testManager := &ManagerImpl{ testManager := &ManagerImpl{
endpoints: make(map[string]endpointInfo), endpoints: make(map[string]endpointInfo),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.Set[string]),
unhealthyDevices: make(map[string]sets.String), unhealthyDevices: make(map[string]sets.Set[string]),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.Set[string]),
podDevices: newPodDevices(), podDevices: newPodDevices(),
checkpointManager: ckm, checkpointManager: ckm,
} }
@ -1502,7 +1503,7 @@ func TestResetExtendedResource(t *testing.T) {
), ),
) )
testManager.healthyDevices[extendedResourceName] = sets.NewString() testManager.healthyDevices[extendedResourceName] = sets.New[string]()
testManager.healthyDevices[extendedResourceName].Insert("dev1") testManager.healthyDevices[extendedResourceName].Insert("dev1")
// checkpoint is present, indicating node hasn't been recreated // checkpoint is present, indicating node hasn't been recreated
err = testManager.writeCheckpoint() err = testManager.writeCheckpoint()

View File

@ -52,10 +52,10 @@ func newPodDevices() *podDevices {
return &podDevices{devs: make(map[string]containerDevices)} return &podDevices{devs: make(map[string]containerDevices)}
} }
func (pdev *podDevices) pods() sets.String { func (pdev *podDevices) pods() sets.Set[string] {
pdev.RLock() pdev.RLock()
defer pdev.RUnlock() defer pdev.RUnlock()
ret := sets.NewString() ret := sets.New[string]()
for k := range pdev.devs { for k := range pdev.devs {
ret.Insert(k) ret.Insert(k)
} }
@ -100,11 +100,11 @@ func (pdev *podDevices) delete(pods []string) {
// Returns list of device Ids allocated to the given pod for the given resource. // Returns list of device Ids allocated to the given pod for the given resource.
// Returns nil if we don't have cached state for the given <podUID, resource>. // Returns nil if we don't have cached state for the given <podUID, resource>.
func (pdev *podDevices) podDevices(podUID, resource string) sets.String { func (pdev *podDevices) podDevices(podUID, resource string) sets.Set[string] {
pdev.RLock() pdev.RLock()
defer pdev.RUnlock() defer pdev.RUnlock()
ret := sets.NewString() ret := sets.New[string]()
for contName := range pdev.devs[podUID] { for contName := range pdev.devs[podUID] {
ret = ret.Union(pdev.containerDevices(podUID, contName, resource)) ret = ret.Union(pdev.containerDevices(podUID, contName, resource))
} }
@ -113,7 +113,7 @@ func (pdev *podDevices) podDevices(podUID, resource string) sets.String {
// Returns list of device Ids allocated to the given container for the given resource. // Returns list of device Ids allocated to the given container for the given resource.
// Returns nil if we don't have cached state for the given <podUID, contName, resource>. // Returns nil if we don't have cached state for the given <podUID, contName, resource>.
func (pdev *podDevices) containerDevices(podUID, contName, resource string) sets.String { func (pdev *podDevices) containerDevices(podUID, contName, resource string) sets.Set[string] {
pdev.RLock() pdev.RLock()
defer pdev.RUnlock() defer pdev.RUnlock()
if _, podExists := pdev.devs[podUID]; !podExists { if _, podExists := pdev.devs[podUID]; !podExists {
@ -130,7 +130,7 @@ func (pdev *podDevices) containerDevices(podUID, contName, resource string) sets
} }
// Populates allocatedResources with the device resources allocated to the specified <podUID, contName>. // Populates allocatedResources with the device resources allocated to the specified <podUID, contName>.
func (pdev *podDevices) addContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) { func (pdev *podDevices) addContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.Set[string]) {
pdev.RLock() pdev.RLock()
defer pdev.RUnlock() defer pdev.RUnlock()
containers, exists := pdev.devs[podUID] containers, exists := pdev.devs[podUID]
@ -147,7 +147,7 @@ func (pdev *podDevices) addContainerAllocatedResources(podUID, contName string,
} }
// Removes the device resources allocated to the specified <podUID, contName> from allocatedResources. // Removes the device resources allocated to the specified <podUID, contName> from allocatedResources.
func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) { func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.Set[string]) {
pdev.RLock() pdev.RLock()
defer pdev.RUnlock() defer pdev.RUnlock()
containers, exists := pdev.devs[podUID] containers, exists := pdev.devs[podUID]
@ -164,15 +164,15 @@ func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName strin
} }
// Returns all of devices allocated to the pods being tracked, keyed by resourceName. // Returns all of devices allocated to the pods being tracked, keyed by resourceName.
func (pdev *podDevices) devices() map[string]sets.String { func (pdev *podDevices) devices() map[string]sets.Set[string] {
ret := make(map[string]sets.String) ret := make(map[string]sets.Set[string])
pdev.RLock() pdev.RLock()
defer pdev.RUnlock() defer pdev.RUnlock()
for _, containerDevices := range pdev.devs { for _, containerDevices := range pdev.devs {
for _, resources := range containerDevices { for _, resources := range containerDevices {
for resource, devices := range resources { for resource, devices := range resources {
if _, exists := ret[resource]; !exists { if _, exists := ret[resource]; !exists {
ret[resource] = sets.NewString() ret[resource] = sets.New[string]()
} }
if devices.allocResp != nil { if devices.allocResp != nil {
ret[resource] = ret[resource].Union(devices.deviceIds.Devices()) ret[resource] = ret[resource].Union(devices.deviceIds.Devices())
@ -464,9 +464,9 @@ func (rdev ResourceDeviceInstances) Clone() ResourceDeviceInstances {
return clone return clone
} }
// Filter takes a condition set expressed as map[string]sets.String and returns a new // Filter takes a condition set expressed as map[string]sets.Set[string] and returns a new
// ResourceDeviceInstances with only the devices matching the condition set. // ResourceDeviceInstances with only the devices matching the condition set.
func (rdev ResourceDeviceInstances) Filter(cond map[string]sets.String) ResourceDeviceInstances { func (rdev ResourceDeviceInstances) Filter(cond map[string]sets.Set[string]) ResourceDeviceInstances {
filtered := NewResourceDeviceInstances() filtered := NewResourceDeviceInstances()
for resourceName, filterIDs := range cond { for resourceName, filterIDs := range cond {
if _, exists := rdev[resourceName]; !exists { if _, exists := rdev[resourceName]; !exists {

View File

@ -65,7 +65,7 @@ func TestGetContainerDevices(t *testing.T) {
func TestResourceDeviceInstanceFilter(t *testing.T) { func TestResourceDeviceInstanceFilter(t *testing.T) {
var expected string var expected string
var cond map[string]sets.String var cond map[string]sets.Set[string]
var resp ResourceDeviceInstances var resp ResourceDeviceInstances
devs := ResourceDeviceInstances{ devs := ResourceDeviceInstances{
"foo": DeviceInstances{ "foo": DeviceInstances{
@ -103,40 +103,40 @@ func TestResourceDeviceInstanceFilter(t *testing.T) {
}, },
} }
resp = devs.Filter(map[string]sets.String{}) resp = devs.Filter(map[string]sets.Set[string]{})
expected = `{}` expected = `{}`
expectResourceDeviceInstances(t, resp, expected) expectResourceDeviceInstances(t, resp, expected)
cond = map[string]sets.String{ cond = map[string]sets.Set[string]{
"foo": sets.NewString("dev-foo1", "dev-foo2"), "foo": sets.New[string]("dev-foo1", "dev-foo2"),
"bar": sets.NewString("dev-bar1"), "bar": sets.New[string]("dev-bar1"),
} }
resp = devs.Filter(cond) resp = devs.Filter(cond)
expected = `{"bar":{"dev-bar1":{"ID":"bar1"}},"foo":{"dev-foo1":{"ID":"foo1"},"dev-foo2":{"ID":"foo2"}}}` expected = `{"bar":{"dev-bar1":{"ID":"bar1"}},"foo":{"dev-foo1":{"ID":"foo1"},"dev-foo2":{"ID":"foo2"}}}`
expectResourceDeviceInstances(t, resp, expected) expectResourceDeviceInstances(t, resp, expected)
cond = map[string]sets.String{ cond = map[string]sets.Set[string]{
"foo": sets.NewString("dev-foo1", "dev-foo2", "dev-foo3"), "foo": sets.New[string]("dev-foo1", "dev-foo2", "dev-foo3"),
"bar": sets.NewString("dev-bar1", "dev-bar2", "dev-bar3"), "bar": sets.New[string]("dev-bar1", "dev-bar2", "dev-bar3"),
"baz": sets.NewString("dev-baz1", "dev-baz2", "dev-baz3"), "baz": sets.New[string]("dev-baz1", "dev-baz2", "dev-baz3"),
} }
resp = devs.Filter(cond) resp = devs.Filter(cond)
expected = `{"bar":{"dev-bar1":{"ID":"bar1"},"dev-bar2":{"ID":"bar2"},"dev-bar3":{"ID":"bar3"}},"baz":{"dev-baz1":{"ID":"baz1"},"dev-baz2":{"ID":"baz2"},"dev-baz3":{"ID":"baz3"}},"foo":{"dev-foo1":{"ID":"foo1"},"dev-foo2":{"ID":"foo2"},"dev-foo3":{"ID":"foo3"}}}` expected = `{"bar":{"dev-bar1":{"ID":"bar1"},"dev-bar2":{"ID":"bar2"},"dev-bar3":{"ID":"bar3"}},"baz":{"dev-baz1":{"ID":"baz1"},"dev-baz2":{"ID":"baz2"},"dev-baz3":{"ID":"baz3"}},"foo":{"dev-foo1":{"ID":"foo1"},"dev-foo2":{"ID":"foo2"},"dev-foo3":{"ID":"foo3"}}}`
expectResourceDeviceInstances(t, resp, expected) expectResourceDeviceInstances(t, resp, expected)
cond = map[string]sets.String{ cond = map[string]sets.Set[string]{
"foo": sets.NewString("dev-foo1", "dev-foo2", "dev-foo3", "dev-foo4"), "foo": sets.New[string]("dev-foo1", "dev-foo2", "dev-foo3", "dev-foo4"),
"bar": sets.NewString("dev-bar1", "dev-bar2", "dev-bar3", "dev-bar4"), "bar": sets.New[string]("dev-bar1", "dev-bar2", "dev-bar3", "dev-bar4"),
"baz": sets.NewString("dev-baz1", "dev-baz2", "dev-baz3", "dev-bar4"), "baz": sets.New[string]("dev-baz1", "dev-baz2", "dev-baz3", "dev-bar4"),
} }
resp = devs.Filter(cond) resp = devs.Filter(cond)
expected = `{"bar":{"dev-bar1":{"ID":"bar1"},"dev-bar2":{"ID":"bar2"},"dev-bar3":{"ID":"bar3"}},"baz":{"dev-baz1":{"ID":"baz1"},"dev-baz2":{"ID":"baz2"},"dev-baz3":{"ID":"baz3"}},"foo":{"dev-foo1":{"ID":"foo1"},"dev-foo2":{"ID":"foo2"},"dev-foo3":{"ID":"foo3"}}}` expected = `{"bar":{"dev-bar1":{"ID":"bar1"},"dev-bar2":{"ID":"bar2"},"dev-bar3":{"ID":"bar3"}},"baz":{"dev-baz1":{"ID":"baz1"},"dev-baz2":{"ID":"baz2"},"dev-baz3":{"ID":"baz3"}},"foo":{"dev-foo1":{"ID":"foo1"},"dev-foo2":{"ID":"foo2"},"dev-foo3":{"ID":"foo3"}}}`
expectResourceDeviceInstances(t, resp, expected) expectResourceDeviceInstances(t, resp, expected)
cond = map[string]sets.String{ cond = map[string]sets.Set[string]{
"foo": sets.NewString("dev-foo1", "dev-foo4", "dev-foo7"), "foo": sets.New[string]("dev-foo1", "dev-foo4", "dev-foo7"),
"bar": sets.NewString("dev-bar1", "dev-bar4", "dev-bar7"), "bar": sets.New[string]("dev-bar1", "dev-bar4", "dev-bar7"),
"baz": sets.NewString("dev-baz1", "dev-baz4", "dev-baz7"), "baz": sets.New[string]("dev-baz1", "dev-baz4", "dev-baz7"),
} }
resp = devs.Filter(cond) resp = devs.Filter(cond)
expected = `{"bar":{"dev-bar1":{"ID":"bar1"}},"baz":{"dev-baz1":{"ID":"baz1"}},"foo":{"dev-foo1":{"ID":"foo1"}}}` expected = `{"bar":{"dev-bar1":{"ID":"bar1"}},"baz":{"dev-baz1":{"ID":"baz1"}},"foo":{"dev-foo1":{"ID":"foo1"}}}`

View File

@ -63,7 +63,7 @@ func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map
continue continue
} }
klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resource", resource, "pod", klog.KObj(pod), "containerName", container.Name) klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resource", resource, "pod", klog.KObj(pod), "containerName", container.Name)
deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.String{}, requested) deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.Set[string]{}, requested)
continue continue
} }
@ -118,7 +118,7 @@ func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymana
continue continue
} }
klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resource", resource, "pod", klog.KObj(pod)) klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resource", resource, "pod", klog.KObj(pod))
deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.String{}, requested) deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.Set[string]{}, requested)
continue continue
} }
@ -132,7 +132,7 @@ func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymana
// Generate TopologyHints for this resource given the current // Generate TopologyHints for this resource given the current
// request size and the list of available devices. // request size and the list of available devices.
deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, sets.String{}, requested) deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, sets.Set[string]{}, requested)
} }
return deviceHints return deviceHints
@ -148,12 +148,12 @@ func (m *ManagerImpl) deviceHasTopologyAlignment(resource string) bool {
return false return false
} }
func (m *ManagerImpl) getAvailableDevices(resource string) sets.String { func (m *ManagerImpl) getAvailableDevices(resource string) sets.Set[string] {
// Strip all devices in use from the list of healthy ones. // Strip all devices in use from the list of healthy ones.
return m.healthyDevices[resource].Difference(m.allocatedDevices[resource]) return m.healthyDevices[resource].Difference(m.allocatedDevices[resource])
} }
func (m *ManagerImpl) generateDeviceTopologyHints(resource string, available sets.String, reusable sets.String, request int) []topologymanager.TopologyHint { func (m *ManagerImpl) generateDeviceTopologyHints(resource string, available sets.Set[string], reusable sets.Set[string], request int) []topologymanager.TopologyHint {
// Initialize minAffinitySize to include all NUMA Nodes // Initialize minAffinitySize to include all NUMA Nodes
minAffinitySize := len(m.numaNodes) minAffinitySize := len(m.numaNodes)

View File

@ -61,8 +61,8 @@ func TestGetTopologyHints(t *testing.T) {
for _, tc := range tcases { for _, tc := range tcases {
m := ManagerImpl{ m := ManagerImpl{
allDevices: NewResourceDeviceInstances(), allDevices: NewResourceDeviceInstances(),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.Set[string]),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.Set[string]),
podDevices: newPodDevices(), podDevices: newPodDevices(),
sourcesReady: &sourcesReadyStub{}, sourcesReady: &sourcesReadyStub{},
activePods: func() []*v1.Pod { return []*v1.Pod{tc.pod} }, activePods: func() []*v1.Pod { return []*v1.Pod{tc.pod} },
@ -71,7 +71,7 @@ func TestGetTopologyHints(t *testing.T) {
for r := range tc.devices { for r := range tc.devices {
m.allDevices[r] = make(DeviceInstances) m.allDevices[r] = make(DeviceInstances)
m.healthyDevices[r] = sets.NewString() m.healthyDevices[r] = sets.New[string]()
for _, d := range tc.devices[r] { for _, d := range tc.devices[r] {
m.allDevices[r][d.ID] = d m.allDevices[r][d.ID] = d
@ -84,7 +84,7 @@ func TestGetTopologyHints(t *testing.T) {
for r, devices := range tc.allocatedDevices[p][c] { for r, devices := range tc.allocatedDevices[p][c] {
m.podDevices.insert(p, c, r, constructDevices(devices), nil) m.podDevices.insert(p, c, r, constructDevices(devices), nil)
m.allocatedDevices[r] = sets.NewString() m.allocatedDevices[r] = sets.New[string]()
for _, d := range devices { for _, d := range devices {
m.allocatedDevices[r].Insert(d) m.allocatedDevices[r].Insert(d)
} }
@ -414,8 +414,8 @@ func TestTopologyAlignedAllocation(t *testing.T) {
for _, tc := range tcases { for _, tc := range tcases {
m := ManagerImpl{ m := ManagerImpl{
allDevices: NewResourceDeviceInstances(), allDevices: NewResourceDeviceInstances(),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.Set[string]),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.Set[string]),
endpoints: make(map[string]endpointInfo), endpoints: make(map[string]endpointInfo),
podDevices: newPodDevices(), podDevices: newPodDevices(),
sourcesReady: &sourcesReadyStub{}, sourcesReady: &sourcesReadyStub{},
@ -424,7 +424,7 @@ func TestTopologyAlignedAllocation(t *testing.T) {
} }
m.allDevices[tc.resource] = make(DeviceInstances) m.allDevices[tc.resource] = make(DeviceInstances)
m.healthyDevices[tc.resource] = sets.NewString() m.healthyDevices[tc.resource] = sets.New[string]()
m.endpoints[tc.resource] = endpointInfo{} m.endpoints[tc.resource] = endpointInfo{}
for _, d := range tc.devices { for _, d := range tc.devices {
@ -441,7 +441,7 @@ func TestTopologyAlignedAllocation(t *testing.T) {
} }
} }
allocated, err := m.devicesToAllocate("podUID", "containerName", tc.resource, tc.request, sets.NewString()) allocated, err := m.devicesToAllocate("podUID", "containerName", tc.resource, tc.request, sets.New[string]())
if err != nil { if err != nil {
t.Errorf("Unexpected error: %v", err) t.Errorf("Unexpected error: %v", err)
continue continue
@ -603,8 +603,8 @@ func TestGetPreferredAllocationParameters(t *testing.T) {
for _, tc := range tcases { for _, tc := range tcases {
m := ManagerImpl{ m := ManagerImpl{
allDevices: NewResourceDeviceInstances(), allDevices: NewResourceDeviceInstances(),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.Set[string]),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.Set[string]),
endpoints: make(map[string]endpointInfo), endpoints: make(map[string]endpointInfo),
podDevices: newPodDevices(), podDevices: newPodDevices(),
sourcesReady: &sourcesReadyStub{}, sourcesReady: &sourcesReadyStub{},
@ -613,13 +613,13 @@ func TestGetPreferredAllocationParameters(t *testing.T) {
} }
m.allDevices[tc.resource] = make(DeviceInstances) m.allDevices[tc.resource] = make(DeviceInstances)
m.healthyDevices[tc.resource] = sets.NewString() m.healthyDevices[tc.resource] = sets.New[string]()
for _, d := range tc.allDevices { for _, d := range tc.allDevices {
m.allDevices[tc.resource][d.ID] = d m.allDevices[tc.resource][d.ID] = d
m.healthyDevices[tc.resource].Insert(d.ID) m.healthyDevices[tc.resource].Insert(d.ID)
} }
m.allocatedDevices[tc.resource] = sets.NewString() m.allocatedDevices[tc.resource] = sets.New[string]()
for _, d := range tc.allocatedDevices { for _, d := range tc.allocatedDevices {
m.allocatedDevices[tc.resource].Insert(d) m.allocatedDevices[tc.resource].Insert(d)
} }
@ -639,17 +639,17 @@ func TestGetPreferredAllocationParameters(t *testing.T) {
opts: &pluginapi.DevicePluginOptions{GetPreferredAllocationAvailable: true}, opts: &pluginapi.DevicePluginOptions{GetPreferredAllocationAvailable: true},
} }
_, err := m.devicesToAllocate("podUID", "containerName", tc.resource, tc.request, sets.NewString(tc.reusableDevices...)) _, err := m.devicesToAllocate("podUID", "containerName", tc.resource, tc.request, sets.New[string](tc.reusableDevices...))
if err != nil { if err != nil {
t.Errorf("Unexpected error: %v", err) t.Errorf("Unexpected error: %v", err)
continue continue
} }
if !sets.NewString(actualAvailable...).Equal(sets.NewString(tc.expectedAvailable...)) { if !sets.New[string](actualAvailable...).Equal(sets.New[string](tc.expectedAvailable...)) {
t.Errorf("%v. expected available: %v but got: %v", tc.description, tc.expectedAvailable, actualAvailable) t.Errorf("%v. expected available: %v but got: %v", tc.description, tc.expectedAvailable, actualAvailable)
} }
if !sets.NewString(actualAvailable...).Equal(sets.NewString(tc.expectedAvailable...)) { if !sets.New[string](actualAvailable...).Equal(sets.New[string](tc.expectedAvailable...)) {
t.Errorf("%v. expected mustInclude: %v but got: %v", tc.description, tc.expectedMustInclude, actualMustInclude) t.Errorf("%v. expected mustInclude: %v but got: %v", tc.description, tc.expectedMustInclude, actualMustInclude)
} }
@ -903,11 +903,11 @@ func TestGetPodDeviceRequest(t *testing.T) {
for _, tc := range tcases { for _, tc := range tcases {
m := ManagerImpl{ m := ManagerImpl{
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.Set[string]),
} }
for _, res := range tc.registeredDevices { for _, res := range tc.registeredDevices {
m.healthyDevices[res] = sets.NewString() m.healthyDevices[res] = sets.New[string]()
} }
accumulatedResourceRequests := m.getPodDeviceRequest(tc.pod) accumulatedResourceRequests := m.getPodDeviceRequest(tc.pod)
@ -925,8 +925,8 @@ func TestGetPodTopologyHints(t *testing.T) {
for _, tc := range tcases { for _, tc := range tcases {
m := ManagerImpl{ m := ManagerImpl{
allDevices: NewResourceDeviceInstances(), allDevices: NewResourceDeviceInstances(),
healthyDevices: make(map[string]sets.String), healthyDevices: make(map[string]sets.Set[string]),
allocatedDevices: make(map[string]sets.String), allocatedDevices: make(map[string]sets.Set[string]),
podDevices: newPodDevices(), podDevices: newPodDevices(),
sourcesReady: &sourcesReadyStub{}, sourcesReady: &sourcesReadyStub{},
activePods: func() []*v1.Pod { return []*v1.Pod{tc.pod, {ObjectMeta: metav1.ObjectMeta{UID: "fakeOtherPod"}}} }, activePods: func() []*v1.Pod { return []*v1.Pod{tc.pod, {ObjectMeta: metav1.ObjectMeta{UID: "fakeOtherPod"}}} },
@ -935,7 +935,7 @@ func TestGetPodTopologyHints(t *testing.T) {
for r := range tc.devices { for r := range tc.devices {
m.allDevices[r] = make(DeviceInstances) m.allDevices[r] = make(DeviceInstances)
m.healthyDevices[r] = sets.NewString() m.healthyDevices[r] = sets.New[string]()
for _, d := range tc.devices[r] { for _, d := range tc.devices[r] {
//add `pluginapi.Device` with Topology //add `pluginapi.Device` with Topology
@ -949,7 +949,7 @@ func TestGetPodTopologyHints(t *testing.T) {
for r, devices := range tc.allocatedDevices[p][c] { for r, devices := range tc.allocatedDevices[p][c] {
m.podDevices.insert(p, c, r, constructDevices(devices), nil) m.podDevices.insert(p, c, r, constructDevices(devices), nil)
m.allocatedDevices[r] = sets.NewString() m.allocatedDevices[r] = sets.New[string]()
for _, d := range devices { for _, d := range devices {
m.allocatedDevices[r].Insert(d) m.allocatedDevices[r].Insert(d)
} }

View File

@ -33,7 +33,7 @@ import (
// Manager manages all the Device Plugins running on a node. // Manager manages all the Device Plugins running on a node.
type Manager interface { type Manager interface {
// Start starts device plugin registration service. // Start starts device plugin registration service.
Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.String) error Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, initialContainers containermap.ContainerMap, initialContainerRunningSet sets.Set[string]) error
// Allocate configures and assigns devices to a container in a pod. From // Allocate configures and assigns devices to a container in a pod. From
// the requested device resources, Allocate will communicate with the // the requested device resources, Allocate will communicate with the

View File

@ -52,14 +52,14 @@ func hardEvictionReservation(thresholds []evictionapi.Threshold, capacity v1.Res
return ret return ret
} }
func buildContainerMapAndRunningSetFromRuntime(ctx context.Context, runtimeService internalapi.RuntimeService) (containermap.ContainerMap, sets.String) { func buildContainerMapAndRunningSetFromRuntime(ctx context.Context, runtimeService internalapi.RuntimeService) (containermap.ContainerMap, sets.Set[string]) {
podSandboxMap := make(map[string]string) podSandboxMap := make(map[string]string)
podSandboxList, _ := runtimeService.ListPodSandbox(ctx, nil) podSandboxList, _ := runtimeService.ListPodSandbox(ctx, nil)
for _, p := range podSandboxList { for _, p := range podSandboxList {
podSandboxMap[p.Id] = p.Metadata.Uid podSandboxMap[p.Id] = p.Metadata.Uid
} }
runningSet := sets.NewString() runningSet := sets.New[string]()
containerMap := containermap.NewContainerMap() containerMap := containermap.NewContainerMap()
containerList, _ := runtimeService.ListContainers(ctx, nil) containerList, _ := runtimeService.ListContainers(ctx, nil)
for _, c := range containerList { for _, c := range containerList {

View File

@ -23,7 +23,8 @@ import (
"strconv" "strconv"
"strings" "strings"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
) )
@ -39,7 +40,7 @@ func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, contain
numaNodes := i.memoryManager.GetMemoryNUMANodes(pod, container) numaNodes := i.memoryManager.GetMemoryNUMANodes(pod, container)
if numaNodes.Len() > 0 { if numaNodes.Len() > 0 {
var affinity []string var affinity []string
for _, numaNode := range numaNodes.List() { for _, numaNode := range sets.List(numaNodes) {
affinity = append(affinity, strconv.Itoa(numaNode)) affinity = append(affinity, strconv.Itoa(numaNode))
} }
containerConfig.Linux.Resources.CpusetMems = strings.Join(affinity, ",") containerConfig.Linux.Resources.CpusetMems = strings.Join(affinity, ",")

View File

@ -50,7 +50,7 @@ func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, contain
klog.InfoS("Add container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID) klog.InfoS("Add container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID)
} }
func (m *fakeManager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Int { func (m *fakeManager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Set[int] {
klog.InfoS("Get MemoryNUMANodes", "pod", klog.KObj(pod), "containerName", container.Name) klog.InfoS("Get MemoryNUMANodes", "pod", klog.KObj(pod), "containerName", container.Name)
return nil return nil
} }

View File

@ -83,7 +83,7 @@ type Manager interface {
GetPodTopologyHints(*v1.Pod) map[string][]topologymanager.TopologyHint GetPodTopologyHints(*v1.Pod) map[string][]topologymanager.TopologyHint
// GetMemoryNUMANodes provides NUMA nodes that are used to allocate the container memory // GetMemoryNUMANodes provides NUMA nodes that are used to allocate the container memory
GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Int GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Set[int]
// GetAllocatableMemory returns the amount of allocatable memory for each NUMA node // GetAllocatableMemory returns the amount of allocatable memory for each NUMA node
GetAllocatableMemory() []state.Block GetAllocatableMemory() []state.Block
@ -213,9 +213,9 @@ func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID
} }
// GetMemoryNUMANodes provides NUMA nodes that used to allocate the container memory // GetMemoryNUMANodes provides NUMA nodes that used to allocate the container memory
func (m *manager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Int { func (m *manager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.Set[int] {
// Get NUMA node affinity of blocks assigned to the container during Allocate() // Get NUMA node affinity of blocks assigned to the container during Allocate()
numaNodes := sets.NewInt() numaNodes := sets.New[int]()
for _, block := range m.state.GetMemoryBlocks(string(pod.UID), container.Name) { for _, block := range m.state.GetMemoryBlocks(string(pod.UID), container.Name) {
for _, nodeID := range block.NUMAAffinity { for _, nodeID := range block.NUMAAffinity {
// avoid nodes duplication when hugepages and memory blocks pinned to the same NUMA node // avoid nodes duplication when hugepages and memory blocks pinned to the same NUMA node

View File

@ -30,11 +30,11 @@ const (
) )
var ( var (
alphaOptions = sets.NewString() alphaOptions = sets.New[string]()
betaOptions = sets.NewString( betaOptions = sets.New[string](
PreferClosestNUMANodes, PreferClosestNUMANodes,
) )
stableOptions = sets.NewString() stableOptions = sets.New[string]()
) )
func CheckPolicyOptionAvailable(option string) error { func CheckPolicyOptionAvailable(option string) error {

View File

@ -112,7 +112,7 @@ func TestNewTopologyManagerOptions(t *testing.T) {
} }
betaOptions.Insert(fancyBetaOption) betaOptions.Insert(fancyBetaOption)
alphaOptions = sets.NewString(fancyAlphaOption) alphaOptions = sets.New[string](fancyAlphaOption)
for _, tcase := range testCases { for _, tcase := range testCases {
t.Run(tcase.description, func(t *testing.T) { t.Run(tcase.description, func(t *testing.T) {